diff --git a/.gitignore b/.gitignore index c6512e64a4e..344b31f7ac1 100644 --- a/.gitignore +++ b/.gitignore @@ -30,6 +30,7 @@ _testmain.go /misc/cgo/testso/main /pkg/ /src/*.*/ +/src/_artifacts/ /src/cmd/cgo/zdefaultcc.go /src/cmd/dist/dist /src/cmd/go/internal/cfg/zdefaultcc.go diff --git a/api/next/68021.txt b/api/next/68021.txt new file mode 100644 index 00000000000..46156e06654 --- /dev/null +++ b/api/next/68021.txt @@ -0,0 +1,13 @@ +pkg go/ast, func ParseDirective(token.Pos, string) (Directive, bool) #68021 +pkg go/ast, method (*Directive) End() token.Pos #68021 +pkg go/ast, method (*Directive) ParseArgs() ([]DirectiveArg, error) #68021 +pkg go/ast, method (*Directive) Pos() token.Pos #68021 +pkg go/ast, type Directive struct #68021 +pkg go/ast, type Directive struct, Args string #68021 +pkg go/ast, type Directive struct, ArgsPos token.Pos #68021 +pkg go/ast, type Directive struct, Name string #68021 +pkg go/ast, type Directive struct, Slash token.Pos #68021 +pkg go/ast, type Directive struct, Tool string #68021 +pkg go/ast, type DirectiveArg struct #68021 +pkg go/ast, type DirectiveArg struct, Arg string #68021 +pkg go/ast, type DirectiveArg struct, Pos token.Pos #68021 diff --git a/api/next/71287.txt b/api/next/71287.txt new file mode 100644 index 00000000000..c1e09a1f523 --- /dev/null +++ b/api/next/71287.txt @@ -0,0 +1,4 @@ +pkg testing, method (*B) ArtifactDir() string #71287 +pkg testing, method (*F) ArtifactDir() string #71287 +pkg testing, method (*T) ArtifactDir() string #71287 +pkg testing, type TB interface, ArtifactDir() string #71287 diff --git a/api/next/73794.txt b/api/next/73794.txt new file mode 100644 index 00000000000..4018c149ecb --- /dev/null +++ b/api/next/73794.txt @@ -0,0 +1 @@ +pkg bytes, method (*Buffer) Peek(int) ([]uint8, error) #73794 diff --git a/doc/go_spec.html b/doc/go_spec.html index 92afe1cee0b..a2f22e31dbf 100644 --- a/doc/go_spec.html +++ b/doc/go_spec.html @@ -1,6 +1,6 @@ @@ -2686,22 +2686,6 @@ of a method declaration associated with a generic type.

-

-Within a type parameter list of a generic type T, a type constraint -may not (directly, or indirectly through the type parameter list of another -generic type) refer to T. -

- -
-type T1[P T1[P]] …                    // illegal: T1 refers to itself
-type T2[P interface{ T2[int] }] …     // illegal: T2 refers to itself
-type T3[P interface{ m(T3[int])}] …   // illegal: T3 refers to itself
-type T4[P T5[P]] …                    // illegal: T4 refers to T5 and
-type T5[P T4[P]] …                    //          T5 refers to T4
-
-type T6[P int] struct{ f *T6[P] }     // ok: reference to T6 is not in type parameter list
-
-

Type constraints

@@ -3173,7 +3157,7 @@ Element = Expression | LiteralValue .

Unless the LiteralType is a type parameter, -its underlying type +its underlying type must be a struct, array, slice, or map type (the syntax enforces this constraint except when the type is given as a TypeName). @@ -4873,7 +4857,7 @@ For instance, x / y * z is the same as (x / y) * z. x <= f() // x <= f() ^a >> b // (^a) >> b f() || g() // f() || g() -x == y+1 && <-chanInt > 0 // (x == (y+1)) && ((<-chanInt) > 0) +x == y+1 && <-chanInt > 0 // (x == (y+1)) && ((<-chanInt) > 0) @@ -6635,7 +6619,7 @@ iteration's variable at that moment.

 var prints []func()
-for i := 0; i < 5; i++ {
+for i := 0; i < 5; i++ {
 	prints = append(prints, func() { println(i) })
 	i++
 }
@@ -6772,7 +6756,7 @@ if the iteration variable is preexisting, the type of the iteration values is th
 variable, which must be of integer type.
 Otherwise, if the iteration variable is declared by the "range" clause or is absent,
 the type of the iteration values is the default type for n.
-If n <= 0, the loop does not run any iterations.
+If n <= 0, the loop does not run any iterations.
 
 
 
  • @@ -7383,8 +7367,8 @@ The values x are passed to a parameter of type ...E where E is the element type of S and the respective parameter passing rules apply. -As a special case, append also accepts a first argument assignable -to type []byte with a second argument of string type followed by +As a special case, append also accepts a slice whose type is assignable to +type []byte with a second argument of string type followed by .... This form appends the bytes of the string.

    @@ -7799,7 +7783,7 @@ compared lexically byte-wise:

    -min(x, y)    == if x <= y then x else y
    +min(x, y)    == if x <= y then x else y
     min(x, y, z) == min(min(x, y), z)
     
    diff --git a/doc/godebug.md b/doc/godebug.md index aaa0f9dd55e..d9ae462b980 100644 --- a/doc/godebug.md +++ b/doc/godebug.md @@ -153,6 +153,21 @@ for example, see the [runtime documentation](/pkg/runtime#hdr-Environment_Variables) and the [go command documentation](/cmd/go#hdr-Build_and_test_caching). +### Go 1.26 + +Go 1.26 added a new `httpcookiemaxnum` setting that controls the maximum number +of cookies that net/http will accept when parsing HTTP headers. If the number of +cookie in a header exceeds the number set in `httpcookiemaxnum`, cookie parsing +will fail early. The default value is `httpcookiemaxnum=3000`. Setting +`httpcookiemaxnum=0` will allow the cookie parsing to accept an indefinite +number of cookies. To avoid denial of service attacks, this setting and default +was backported to Go 1.25.2 and Go 1.24.8. + +Go 1.26 added a new `urlstrictcolons` setting that controls whether `net/url.Parse` +allows malformed hostnames containing colons outside of a bracketed IPv6 address. +The default `urlstrictcolons=1` rejects URLs such as `http://localhost:1:2` or `http://::1/`. +Colons are permitted as part of a bracketed IPv6 address, such as `http://[::1]/`. + ### Go 1.25 Go 1.25 added a new `decoratemappings` setting that controls whether the Go diff --git a/doc/next/2-language.md b/doc/next/2-language.md index ded7becf014..71da62f59e5 100644 --- a/doc/next/2-language.md +++ b/doc/next/2-language.md @@ -19,10 +19,14 @@ type Person struct { Age *int `json:"age"` // age if known; nil otherwise } -func personJSON(name string, age int) ([]byte, error) { +func personJSON(name string, born time.Time) ([]byte, error) { return json.Marshal(Person{ Name: name, - Age: new(age), + Age: new(yearsSince(born)), }) } + +func yearsSince(t time.Time) int { + return int(time.Since(t).Hours() / (365.25 * 24)) // approximately +} ``` diff --git a/doc/next/3-tools.md b/doc/next/3-tools.md index 9459a5490e7..c0a4601c0b9 100644 --- a/doc/next/3-tools.md +++ b/doc/next/3-tools.md @@ -7,5 +7,15 @@ a replacement for `go tool doc`: it takes the same flags and arguments and has the same behavior. + +The `go fix` command, following the pattern of `go vet` in Go 1.10, +now uses the Go analysis framework (`golang.org/x/tools/go/analysis`). +This means the same analyzers that provide diagnostics in `go vet` +can be used to suggest and apply fixes in `go fix`. +The `go fix` command's historical fixers, all of which were obsolete, +have been removed and replaced by a suite of new analyzers that +offer fixes to use newer features of the language and library. + + ### Cgo {#cgo} diff --git a/doc/next/5-toolchain.md b/doc/next/5-toolchain.md index cc32f30a521..b5893288e5c 100644 --- a/doc/next/5-toolchain.md +++ b/doc/next/5-toolchain.md @@ -4,6 +4,10 @@ ## Linker {#linker} +On 64-bit ARM-based Windows (the `windows/arm64` port), the linker now supports internal +linking mode of cgo programs, which can be requested with the +`-ldflags=-linkmode=internal` flag. + ## Bootstrap {#bootstrap} diff --git a/doc/next/6-stdlib/99-minor/bytes/73794.md b/doc/next/6-stdlib/99-minor/bytes/73794.md new file mode 100644 index 00000000000..a44dfc10e69 --- /dev/null +++ b/doc/next/6-stdlib/99-minor/bytes/73794.md @@ -0,0 +1,2 @@ +The new [Buffer.Peek] method returns the next n bytes from the buffer without +advancing it. diff --git a/doc/next/6-stdlib/99-minor/go/ast/68021.md b/doc/next/6-stdlib/99-minor/go/ast/68021.md new file mode 100644 index 00000000000..0ff1a0b11e8 --- /dev/null +++ b/doc/next/6-stdlib/99-minor/go/ast/68021.md @@ -0,0 +1,4 @@ +The new [ParseDirective] function parses [directive +comments](/doc/comment#Syntax), which are comments such as `//go:generate`. +Source code tools can support their own directive comments and this new API +should help them implement the conventional syntax. diff --git a/doc/next/6-stdlib/99-minor/net/url/31024.md b/doc/next/6-stdlib/99-minor/net/url/31024.md new file mode 100644 index 00000000000..11ed31e87c5 --- /dev/null +++ b/doc/next/6-stdlib/99-minor/net/url/31024.md @@ -0,0 +1,4 @@ +[Parse] now rejects malformed URLs containing colons in the host subcomponent, +such as `http://::1/` or `http://localhost:80:80/`. +URLs containing bracketed IPv6 addresses, such as `http://[::1]/` are still accepted. +The new GODEBUG=urlstrictcolons=0 setting restores the old behavior. diff --git a/doc/next/6-stdlib/99-minor/testing/71287.md b/doc/next/6-stdlib/99-minor/testing/71287.md new file mode 100644 index 00000000000..82cac638101 --- /dev/null +++ b/doc/next/6-stdlib/99-minor/testing/71287.md @@ -0,0 +1,18 @@ +The new methods [T.ArtifactDir], [B.ArtifactDir], and [F.ArtifactDir] +return a directory in which to write test output files (artifacts). + +When the `-artifacts` flag is provided to `go test`, +this directory will be located under the output directory +(specified with `-outputdir`, or the current directory by default). +Otherwise, artifacts are stored in a temporary directory +which is removed after the test completes. + +The first call to `ArtifactDir` when `-artifacts` is provided +writes the location of the directory to the test log. + +For example, in a test named `TestArtifacts`, +`t.ArtifactDir()` emits: + +``` +=== ARTIFACTS Test /path/to/artifact/dir +``` diff --git a/lib/hg/goreposum.py b/lib/hg/goreposum.py new file mode 100644 index 00000000000..1a7d7a44466 --- /dev/null +++ b/lib/hg/goreposum.py @@ -0,0 +1,64 @@ +# Copyright 2025 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# Mercurial extension to add a 'goreposum' command that +# computes a hash of a remote repo's tag state. +# Tag definitions can come from the .hgtags file stored in +# any head of any branch, and the server protocol does not +# expose the tags directly. However, the protocol does expose +# the hashes of all the branch heads, so we can use a hash of +# all those branch names and heads as a conservative snapshot +# of the entire remote repo state, and use that as the tag sum. +# Any change on the server then invalidates the tag sum, +# even if it didn't have anything to do with tags, but at least +# we will avoid re-cloning a server when there have been no +# changes at all. +# +# Separately, this extension also adds a 'golookup' command that +# returns the hash of a specific reference, like 'default' or a tag. +# And golookup of a hash confirms that it still exists on the server. +# We can use that to revalidate that specific versions still exist and +# have the same meaning they did the last time we checked. +# +# Usage: +# +# hg --config "extensions.goreposum=$GOROOT/lib/hg/goreposum.py" goreposum REPOURL + +import base64, hashlib, sys +from mercurial import registrar, ui, hg, node +from mercurial.i18n import _ +cmdtable = {} +command = registrar.command(cmdtable) +@command(b'goreposum', [], _('url'), norepo=True) +def goreposum(ui, url): + """ + goreposum computes a checksum of all the named state in the remote repo. + It hashes together all the branch names and hashes + and then all the bookmark names and hashes. + Tags are stored in .hgtags files in any of the branches, + so the branch metadata includes the tags as well. + """ + h = hashlib.sha256() + peer = hg.peer(ui, {}, url) + for name, revs in peer.branchmap().items(): + h.update(name) + for r in revs: + h.update(b' ') + h.update(r) + h.update(b'\n') + if (b'bookmarks' in peer.listkeys(b'namespaces')): + for name, rev in peer.listkeys(b'bookmarks').items(): + h.update(name) + h.update(b'=') + h.update(rev) + h.update(b'\n') + print('r1:'+base64.standard_b64encode(h.digest()).decode('utf-8')) + +@command(b'golookup', [], _('url rev'), norepo=True) +def golookup(ui, url, rev): + """ + golookup looks up a single identifier in the repo, + printing its hash. + """ + print(node.hex(hg.peer(ui, {}, url).lookup(rev)).decode('utf-8')) diff --git a/src/archive/tar/common.go b/src/archive/tar/common.go index 7b3945ff153..ad31bbb64aa 100644 --- a/src/archive/tar/common.go +++ b/src/archive/tar/common.go @@ -39,6 +39,7 @@ var ( errMissData = errors.New("archive/tar: sparse file references non-existent data") errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data") errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole") + errSparseTooLong = errors.New("archive/tar: sparse map too long") ) type headerError []string diff --git a/src/archive/tar/reader.go b/src/archive/tar/reader.go index 8483fb52a28..16ac2f5b17c 100644 --- a/src/archive/tar/reader.go +++ b/src/archive/tar/reader.go @@ -531,12 +531,17 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { cntNewline int64 buf bytes.Buffer blk block + totalSize int ) // feedTokens copies data in blocks from r into buf until there are // at least cnt newlines in buf. It will not read more blocks than needed. feedTokens := func(n int64) error { for cntNewline < n { + totalSize += len(blk) + if totalSize > maxSpecialFileSize { + return errSparseTooLong + } if _, err := mustReadFull(r, blk[:]); err != nil { return err } @@ -569,8 +574,8 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { } // Parse for all member entries. - // numEntries is trusted after this since a potential attacker must have - // committed resources proportional to what this library used. + // numEntries is trusted after this since feedTokens limits the number of + // tokens based on maxSpecialFileSize. if err := feedTokens(2 * numEntries); err != nil { return nil, err } diff --git a/src/archive/tar/reader_test.go b/src/archive/tar/reader_test.go index 99340a30471..fca53dae741 100644 --- a/src/archive/tar/reader_test.go +++ b/src/archive/tar/reader_test.go @@ -621,6 +621,11 @@ func TestReader(t *testing.T) { }, Format: FormatPAX, }}, + }, { + // Small compressed file that uncompresses to + // a file with a very large GNU 1.0 sparse map. + file: "testdata/gnu-sparse-many-zeros.tar.bz2", + err: errSparseTooLong, }} for _, v := range vectors { diff --git a/src/archive/tar/testdata/gnu-sparse-many-zeros.tar.bz2 b/src/archive/tar/testdata/gnu-sparse-many-zeros.tar.bz2 new file mode 100644 index 00000000000..751d7fd4b68 Binary files /dev/null and b/src/archive/tar/testdata/gnu-sparse-many-zeros.tar.bz2 differ diff --git a/src/archive/zip/reader_test.go b/src/archive/zip/reader_test.go index 410b2d037e4..cb8a0c28714 100644 --- a/src/archive/zip/reader_test.go +++ b/src/archive/zip/reader_test.go @@ -1213,7 +1213,6 @@ func TestFS(t *testing.T) { []string{"a/b/c"}, }, } { - test := test t.Run(test.file, func(t *testing.T) { t.Parallel() z, err := OpenReader(test.file) @@ -1247,7 +1246,6 @@ func TestFSWalk(t *testing.T) { wantErr: true, }, } { - test := test t.Run(test.file, func(t *testing.T) { t.Parallel() z, err := OpenReader(test.file) diff --git a/src/bytes/buffer.go b/src/bytes/buffer.go index 9684513942d..3eb5b350c38 100644 --- a/src/bytes/buffer.go +++ b/src/bytes/buffer.go @@ -77,6 +77,18 @@ func (b *Buffer) String() string { return string(b.buf[b.off:]) } +// Peek returns the next n bytes without advancing the buffer. +// If Peek returns fewer than n bytes, it also returns [io.EOF]. +// The slice is only valid until the next call to a read or write method. +// The slice aliases the buffer content at least until the next buffer modification, +// so immediate changes to the slice will affect the result of future reads. +func (b *Buffer) Peek(n int) ([]byte, error) { + if b.Len() < n { + return b.buf[b.off:], io.EOF + } + return b.buf[b.off:n], nil +} + // empty reports whether the unread portion of the buffer is empty. func (b *Buffer) empty() bool { return len(b.buf) <= b.off } diff --git a/src/bytes/buffer_test.go b/src/bytes/buffer_test.go index b46ba1204eb..5f5cc483b03 100644 --- a/src/bytes/buffer_test.go +++ b/src/bytes/buffer_test.go @@ -531,6 +531,34 @@ func TestReadString(t *testing.T) { } } +var peekTests = []struct { + buffer string + n int + expected string + err error +}{ + {"", 0, "", nil}, + {"aaa", 3, "aaa", nil}, + {"foobar", 2, "fo", nil}, + {"a", 2, "a", io.EOF}, +} + +func TestPeek(t *testing.T) { + for _, test := range peekTests { + buf := NewBufferString(test.buffer) + bytes, err := buf.Peek(test.n) + if string(bytes) != test.expected { + t.Errorf("expected %q, got %q", test.expected, bytes) + } + if err != test.err { + t.Errorf("expected error %v, got %v", test.err, err) + } + if buf.Len() != len(test.buffer) { + t.Errorf("bad length after peek: %d, want %d", buf.Len(), len(test.buffer)) + } + } +} + func BenchmarkReadString(b *testing.B) { const n = 32 << 10 diff --git a/src/bytes/bytes_test.go b/src/bytes/bytes_test.go index f18915c879e..9547ede312f 100644 --- a/src/bytes/bytes_test.go +++ b/src/bytes/bytes_test.go @@ -1224,7 +1224,7 @@ func TestMap(t *testing.T) { // Run a couple of awful growth/shrinkage tests a := tenRunes('a') - // 1. Grow. This triggers two reallocations in Map. + // 1. Grow. This triggers two reallocations in Map. maxRune := func(r rune) rune { return unicode.MaxRune } m := Map(maxRune, []byte(a)) expect := tenRunes(unicode.MaxRune) diff --git a/src/cmd/asm/internal/arch/arch.go b/src/cmd/asm/internal/arch/arch.go index 8481a8f378f..fb9e7851110 100644 --- a/src/cmd/asm/internal/arch/arch.go +++ b/src/cmd/asm/internal/arch/arch.go @@ -92,7 +92,8 @@ func jumpX86(word string) bool { func jumpRISCV(word string) bool { switch word { case "BEQ", "BEQZ", "BGE", "BGEU", "BGEZ", "BGT", "BGTU", "BGTZ", "BLE", "BLEU", "BLEZ", - "BLT", "BLTU", "BLTZ", "BNE", "BNEZ", "CALL", "JAL", "JALR", "JMP": + "BLT", "BLTU", "BLTZ", "BNE", "BNEZ", "CALL", "CBEQZ", "CBNEZ", "CJ", "CJALR", "CJR", + "JAL", "JALR", "JMP": return true } return false diff --git a/src/cmd/asm/internal/arch/arm64.go b/src/cmd/asm/internal/arch/arm64.go index 87ccb8c0409..d562e5907d6 100644 --- a/src/cmd/asm/internal/arch/arm64.go +++ b/src/cmd/asm/internal/arch/arm64.go @@ -195,149 +195,6 @@ func ARM64RegisterShift(reg, op, count int16) (int64, error) { return int64(reg&31)<<16 | int64(op)<<22 | int64(uint16(count)), nil } -// ARM64RegisterExtension constructs an ARM64 register with extension or arrangement. -func ARM64RegisterExtension(a *obj.Addr, ext string, reg, num int16, isAmount, isIndex bool) error { - Rnum := (reg & 31) + int16(num<<5) - if isAmount { - if num < 0 || num > 7 { - return errors.New("index shift amount is out of range") - } - } - if reg <= arm64.REG_R31 && reg >= arm64.REG_R0 { - if !isAmount { - return errors.New("invalid register extension") - } - switch ext { - case "UXTB": - if a.Type == obj.TYPE_MEM { - return errors.New("invalid shift for the register offset addressing mode") - } - a.Reg = arm64.REG_UXTB + Rnum - case "UXTH": - if a.Type == obj.TYPE_MEM { - return errors.New("invalid shift for the register offset addressing mode") - } - a.Reg = arm64.REG_UXTH + Rnum - case "UXTW": - // effective address of memory is a base register value and an offset register value. - if a.Type == obj.TYPE_MEM { - a.Index = arm64.REG_UXTW + Rnum - } else { - a.Reg = arm64.REG_UXTW + Rnum - } - case "UXTX": - if a.Type == obj.TYPE_MEM { - return errors.New("invalid shift for the register offset addressing mode") - } - a.Reg = arm64.REG_UXTX + Rnum - case "SXTB": - if a.Type == obj.TYPE_MEM { - return errors.New("invalid shift for the register offset addressing mode") - } - a.Reg = arm64.REG_SXTB + Rnum - case "SXTH": - if a.Type == obj.TYPE_MEM { - return errors.New("invalid shift for the register offset addressing mode") - } - a.Reg = arm64.REG_SXTH + Rnum - case "SXTW": - if a.Type == obj.TYPE_MEM { - a.Index = arm64.REG_SXTW + Rnum - } else { - a.Reg = arm64.REG_SXTW + Rnum - } - case "SXTX": - if a.Type == obj.TYPE_MEM { - a.Index = arm64.REG_SXTX + Rnum - } else { - a.Reg = arm64.REG_SXTX + Rnum - } - case "LSL": - a.Index = arm64.REG_LSL + Rnum - default: - return errors.New("unsupported general register extension type: " + ext) - - } - } else if reg <= arm64.REG_V31 && reg >= arm64.REG_V0 { - switch ext { - case "B8": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_8B & 15) << 5) - case "B16": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_16B & 15) << 5) - case "H4": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_4H & 15) << 5) - case "H8": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_8H & 15) << 5) - case "S2": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_2S & 15) << 5) - case "S4": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_4S & 15) << 5) - case "D1": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_1D & 15) << 5) - case "D2": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_2D & 15) << 5) - case "Q1": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_1Q & 15) << 5) - case "B": - if !isIndex { - return nil - } - a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_B & 15) << 5) - a.Index = num - case "H": - if !isIndex { - return nil - } - a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_H & 15) << 5) - a.Index = num - case "S": - if !isIndex { - return nil - } - a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_S & 15) << 5) - a.Index = num - case "D": - if !isIndex { - return nil - } - a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_D & 15) << 5) - a.Index = num - default: - return errors.New("unsupported simd register extension type: " + ext) - } - } else { - return errors.New("invalid register and extension combination") - } - return nil -} - // ARM64RegisterArrangement constructs an ARM64 vector register arrangement. func ARM64RegisterArrangement(reg int16, name, arng string) (int64, error) { var curQ, curSize uint16 diff --git a/src/cmd/asm/internal/asm/asm.go b/src/cmd/asm/internal/asm/asm.go index 389307af29e..0f75edf4e5d 100644 --- a/src/cmd/asm/internal/asm/asm.go +++ b/src/cmd/asm/internal/asm/asm.go @@ -248,7 +248,7 @@ func (p *Parser) asmData(operands [][]lex.Token) { case obj.TYPE_CONST: switch sz { case 1, 2, 4, 8: - nameAddr.Sym.WriteInt(p.ctxt, nameAddr.Offset, int(sz), valueAddr.Offset) + nameAddr.Sym.WriteInt(p.ctxt, nameAddr.Offset, sz, valueAddr.Offset) default: p.errorf("bad int size for DATA argument: %d", sz) } @@ -262,10 +262,10 @@ func (p *Parser) asmData(operands [][]lex.Token) { p.errorf("bad float size for DATA argument: %d", sz) } case obj.TYPE_SCONST: - nameAddr.Sym.WriteString(p.ctxt, nameAddr.Offset, int(sz), valueAddr.Val.(string)) + nameAddr.Sym.WriteString(p.ctxt, nameAddr.Offset, sz, valueAddr.Val.(string)) case obj.TYPE_ADDR: if sz == p.arch.PtrSize { - nameAddr.Sym.WriteAddr(p.ctxt, nameAddr.Offset, int(sz), valueAddr.Sym, valueAddr.Offset) + nameAddr.Sym.WriteAddr(p.ctxt, nameAddr.Offset, sz, valueAddr.Sym, valueAddr.Offset) } else { p.errorf("bad addr size for DATA argument: %d", sz) } diff --git a/src/cmd/asm/internal/asm/endtoend_test.go b/src/cmd/asm/internal/asm/endtoend_test.go index afaf02815f9..e53263356d1 100644 --- a/src/cmd/asm/internal/asm/endtoend_test.go +++ b/src/cmd/asm/internal/asm/endtoend_test.go @@ -38,7 +38,7 @@ func testEndToEnd(t *testing.T, goarch, file string) { ctxt.IsAsm = true defer ctxt.Bso.Flush() failed := false - ctxt.DiagFunc = func(format string, args ...interface{}) { + ctxt.DiagFunc = func(format string, args ...any) { failed = true t.Errorf(format, args...) } @@ -193,7 +193,7 @@ Diff: top := pList.Firstpc var text *obj.LSym ok = true - ctxt.DiagFunc = func(format string, args ...interface{}) { + ctxt.DiagFunc = func(format string, args ...any) { t.Errorf(format, args...) ok = false } @@ -294,7 +294,7 @@ func testErrors(t *testing.T, goarch, file string, flags ...string) { failed := false var errBuf bytes.Buffer parser.errorWriter = &errBuf - ctxt.DiagFunc = func(format string, args ...interface{}) { + ctxt.DiagFunc = func(format string, args ...any) { failed = true s := fmt.Sprintf(format, args...) if !strings.HasSuffix(s, "\n") { @@ -467,6 +467,7 @@ func TestLOONG64Encoder(t *testing.T) { testEndToEnd(t, "loong64", "loong64enc3") testEndToEnd(t, "loong64", "loong64enc4") testEndToEnd(t, "loong64", "loong64enc5") + testEndToEnd(t, "loong64", "loong64enc6") testEndToEnd(t, "loong64", "loong64") } diff --git a/src/cmd/asm/internal/asm/parse.go b/src/cmd/asm/internal/asm/parse.go index 8f8f6dcc346..25d596f4d66 100644 --- a/src/cmd/asm/internal/asm/parse.go +++ b/src/cmd/asm/internal/asm/parse.go @@ -78,7 +78,7 @@ func NewParser(ctxt *obj.Link, ar *arch.Arch, lexer lex.TokenReader) *Parser { // and turn it into a recoverable panic. var panicOnError bool -func (p *Parser) errorf(format string, args ...interface{}) { +func (p *Parser) errorf(format string, args ...any) { if panicOnError { panic(fmt.Errorf(format, args...)) } @@ -90,7 +90,7 @@ func (p *Parser) errorf(format string, args ...interface{}) { if p.lex != nil { // Put file and line information on head of message. format = "%s:%d: " + format + "\n" - args = append([]interface{}{p.lex.File(), p.lineNum}, args...) + args = append([]any{p.lex.File(), p.lineNum}, args...) } fmt.Fprintf(p.errorWriter, format, args...) p.errorCount++ @@ -775,7 +775,7 @@ func (p *Parser) registerExtension(a *obj.Addr, name string, prefix rune) { switch p.arch.Family { case sys.ARM64: - err := arch.ARM64RegisterExtension(a, ext, reg, num, isAmount, isIndex) + err := arm64.ARM64RegisterExtension(a, ext, reg, num, isAmount, isIndex) if err != nil { p.errorf("%v", err) } diff --git a/src/cmd/asm/internal/asm/testdata/arm64.s b/src/cmd/asm/internal/asm/testdata/arm64.s index 236f1a66979..773380e9bb6 100644 --- a/src/cmd/asm/internal/asm/testdata/arm64.s +++ b/src/cmd/asm/internal/asm/testdata/arm64.s @@ -400,6 +400,8 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 MOVD $0x11110000, R1 // MOVD $286326784, R1 // 2122a2d2 MOVD $0xaaaa0000aaaa1111, R1 // MOVD $-6149102338357718767, R1 // 212282d24155b5f24155f5f2 MOVD $0x1111ffff1111aaaa, R1 // MOVD $1230045644216969898, R1 // a1aa8a922122a2f22122e2f2 + MOVD $0xaaaaaaaaaaaaaaab, R1 // MOVD $-6148914691236517205, R1 // e1f301b2615595f2 + MOVD $0x0ff019940ff00ff0, R1 // MOVD $1148446028692721648, R1 // e19f0cb28132c3f2 MOVD $0, R1 // e1031faa MOVD $-1, R1 // 01008092 MOVD $0x210000, R0 // MOVD $2162688, R0 // 2004a0d2 @@ -630,6 +632,8 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 FMOVS F1, 0x44332211(R2) // FMOVS F1, 1144201745(R2) FMOVD F1, 0x1007000(R2) // FMOVD F1, 16805888(R2) FMOVD F1, 0x44332211(R2) // FMOVD F1, 1144201745(R2) + FMOVQ F1, 0x1003000(R2) // FMOVQ F1, 16789504(R2) + FMOVQ F1, 0x44332211(R2) // FMOVQ F1, 1144201745(R2) MOVB 0x1000000(R1), R2 // MOVB 16777216(R1), R2 MOVB 0x44332211(R1), R2 // MOVB 1144201745(R1), R2 @@ -643,6 +647,8 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 FMOVS 0x44332211(R1), F2 // FMOVS 1144201745(R1), F2 FMOVD 0x1000000(R1), F2 // FMOVD 16777216(R1), F2 FMOVD 0x44332211(R1), F2 // FMOVD 1144201745(R1), F2 + FMOVQ 0x1000000(R1), F2 // FMOVQ 16777216(R1), F2 + FMOVQ 0x44332211(R1), F2 // FMOVQ 1144201745(R1), F2 // shifted or extended register offset. MOVD (R2)(R6.SXTW), R4 // 44c866f8 @@ -1894,4 +1900,12 @@ next: BTI J // 9f2403d5 BTI JC // df2403d5 +// Pointer Authentication Codes (PAC) + PACIASP // 3f2303d5 + AUTIASP // bf2303d5 + PACIBSP // 7f2303d5 + AUTIBSP // ff2303d5 + AUTIA1716 // 9f2103d5 + AUTIB1716 // df2103d5 + END diff --git a/src/cmd/asm/internal/asm/testdata/arm64error.s b/src/cmd/asm/internal/asm/testdata/arm64error.s index 55890ce3e63..ce88e3ca540 100644 --- a/src/cmd/asm/internal/asm/testdata/arm64error.s +++ b/src/cmd/asm/internal/asm/testdata/arm64error.s @@ -422,4 +422,10 @@ TEXT errors(SB),$0 SHA1H V1.B16, V2.B16 // ERROR "invalid operands" BTI // ERROR "missing operand" BTI PLDL1KEEP // ERROR "illegal argument" + PACIASP C // ERROR "illegal combination" + AUTIASP R2 // ERROR "illegal combination" + PACIBSP R0 // ERROR "illegal combination" + AUTIBSP C // ERROR "illegal combination" + AUTIA1716 $45 // ERROR "illegal combination" + AUTIB1716 R0 // ERROR "illegal combination" RET diff --git a/src/cmd/asm/internal/asm/testdata/loong64enc1.s b/src/cmd/asm/internal/asm/testdata/loong64enc1.s index fd86db7a4fc..c820a0a5a10 100644 --- a/src/cmd/asm/internal/asm/testdata/loong64enc1.s +++ b/src/cmd/asm/internal/asm/testdata/loong64enc1.s @@ -93,8 +93,8 @@ lable2: MOVV R4, 1(R5) // a404c029 MOVB R4, 1(R5) // a4040029 MOVBU R4, 1(R5) // a4040029 - SC R4, 1(R5) // a4040021 - SCV R4, 1(R5) // a4040023 + SC R4, 4096(R5) // a4001021 + SCV R4, 4096(R5) // a4001023 MOVW y+8(FP), R4 // 64408028 MOVWU y+8(FP), R4 // 6440802a MOVV y+8(FP), R4 // 6440c028 @@ -105,8 +105,8 @@ lable2: MOVV 1(R5), R4 // a404c028 MOVB 1(R5), R4 // a4040028 MOVBU 1(R5), R4 // a404002a - LL 1(R5), R4 // a4040020 - LLV 1(R5), R4 // a4040022 + LL 4096(R5), R4 // a4001020 + LLV 4096(R5), R4 // a4001022 MOVW $4(R4), R5 // 8510c002 MOVV $4(R4), R5 // 8510c002 MOVW $-1, R4 // 04fcff02 @@ -261,22 +261,18 @@ lable2: MOVV R4, FCC0 // 80d81401 // LDPTR.{W/D} and STPTR.{W/D} instructions - MOVWP R5, -32768(R4) // 85008025 MOVWP R5, 32764(R4) // 85fc7f25 MOVWP R5, 32(R4) // 85200025 MOVWP R5, 4(R4) // 85040025 MOVWP R5, (R4) // 85000025 - MOVVP R5, -32768(R4) // 85008027 MOVVP R5, 32764(R4) // 85fc7f27 MOVVP R5, 32(R4) // 85200027 MOVVP R5, 4(R4) // 85040027 MOVVP R5, (R4) // 85000027 - MOVWP -32768(R5), R4 // a4008024 MOVWP 32764(R5), R4 // a4fc7f24 MOVWP 32(R5), R4 // a4200024 MOVWP 4(R5), R4 // a4040024 MOVWP (R5), R4 // a4000024 - MOVVP -32768(R5), R4 // a4008026 MOVVP 32764(R5), R4 // a4fc7f26 MOVVP 32(R5), R4 // a4200026 MOVVP 4(R5), R4 // a4040026 @@ -537,12 +533,18 @@ lable2: XVMOVQ X28.V[3], X8 // 88ef0377 XVMOVQ X27.V[0], X9 // 69e30377 - //Move vector element to vector. + // Move vector element to vector. VMOVQ V1.B[3], V9.B16 // 298cf772 VMOVQ V2.H[2], V8.H8 // 48c8f772 VMOVQ V3.W[1], V7.W4 // 67e4f772 VMOVQ V4.V[0], V6.V2 // 86f0f772 + // Move vector register to vector register. + VMOVQ V1, V9 // 29002d73 + VMOVQ V2, V8 // 48002d73 + XVMOVQ X3, X7 // 67002d77 + XVMOVQ X4, X6 // 86002d77 + // Load data from memory and broadcast to each element of a vector register: VMOVQ offset(Rj), . VMOVQ (R4), V0.B16 // 80008030 VMOVQ 1(R4), V0.B16 // 80048030 @@ -841,6 +843,42 @@ lable2: XVSUBWU $15, X1, X2 // 223c8d76 XVSUBVU $16, X1, X2 // 22c08d76 + // [X]VSADD{B,H,W,V}, [X]VSSUB{B,H,W,V} instructions + VSADDB V1, V2, V3 // 43044670 + VSADDH V1, V2, V3 // 43844670 + VSADDW V1, V2, V3 // 43044770 + VSADDV V1, V2, V3 // 43844770 + VSSUBB V1, V2, V3 // 43044870 + VSSUBH V1, V2, V3 // 43844870 + VSSUBW V1, V2, V3 // 43044970 + VSSUBV V1, V2, V3 // 43844970 + XVSADDB X3, X2, X1 // 410c4674 + XVSADDH X3, X2, X1 // 418c4674 + XVSADDW X3, X2, X1 // 410c4774 + XVSADDV X3, X2, X1 // 418c4774 + XVSSUBB X3, X2, X1 // 410c4874 + XVSSUBH X3, X2, X1 // 418c4874 + XVSSUBW X3, X2, X1 // 410c4974 + XVSSUBV X3, X2, X1 // 418c4974 + + // [X]VSADD{B,H,W,V}U, [X]VSSUB{B,H,W,V}U instructions + VSADDBU V1, V2, V3 // 43044a70 + VSADDHU V1, V2, V3 // 43844a70 + VSADDWU V1, V2, V3 // 43044b70 + VSADDVU V1, V2, V3 // 43844b70 + VSSUBBU V1, V2, V3 // 43044c70 + VSSUBHU V1, V2, V3 // 43844c70 + VSSUBWU V1, V2, V3 // 43044d70 + VSSUBVU V1, V2, V3 // 43844d70 + XVSADDBU X1, X2, X3 // 43044a74 + XVSADDHU X1, X2, X3 // 43044b74 + XVSADDWU X1, X2, X3 // 43044b74 + XVSADDVU X1, X2, X3 // 43844b74 + XVSSUBBU X1, X2, X3 // 43044c74 + XVSSUBHU X1, X2, X3 // 43844c74 + XVSSUBWU X1, X2, X3 // 43044d74 + XVSSUBVU X1, X2, X3 // 43844d74 + // [X]VILV{L/H}{B,H,W,V} instructions VILVLB V1, V2, V3 // 43041a71 VILVLH V1, V2, V3 // 43841a71 @@ -1021,6 +1059,32 @@ lable2: XVSHUF4IV $8, X1, X2 // 22209c77 XVSHUF4IV $15, X1, X2 // 223c9c77 + // [X]VSHUF.{B/H/W/V} instructions + VSHUFH V1, V2, V3 // 43847a71 + VSHUFW V1, V2, V3 // 43047b71 + VSHUFV V1, V2, V3 // 43847b71 + XVSHUFH X1, X2, X3 // 43847a75 + XVSHUFW X1, X2, X3 // 43047b75 + XVSHUFV X1, X2, X3 // 43847b75 + VSHUFB V1, V2, V3, V4 // 6488500d + XVSHUFB X1, X2, X3, X4 // 6488600d + + // VPERMIW, XVPERMI{W,V,Q} instructions + VPERMIW $0x1B, V1, V2 // VPERMIW $27, V1, V2 // 226ce473 + XVPERMIW $0x2B, X1, X2 // XVPERMIW $43, X1, X2 // 22ace477 + XVPERMIV $0x3B, X1, X2 // XVPERMIV $59, X1, X2 // 22ece877 + XVPERMIQ $0x4B, X1, X2 // XVPERMIQ $75, X1, X2 // 222ced77 + + // A{,X}VEXTRINS.{B,H,W,V} instructions + VEXTRINSB $0x18, V1, V2 // VEXTRINSB $24, V1, V2 // 22608c73 + VEXTRINSH $0x27, V1, V2 // VEXTRINSH $39, V1, V2 // 229c8873 + VEXTRINSW $0x36, V1, V2 // VEXTRINSW $54, V1, V2 // 22d88473 + VEXTRINSV $0x45, V1, V2 // VEXTRINSV $69, V1, V2 // 22148173 + XVEXTRINSB $0x54, X1, X2 // XVEXTRINSB $84, X1, X2 // 22508d77 + XVEXTRINSH $0x63, X1, X2 // XVEXTRINSH $99, X1, X2 // 228c8977 + XVEXTRINSW $0x72, X1, X2 // XVEXTRINSW $114, X1, X2 // 22c88577 + XVEXTRINSV $0x81, X1, X2 // XVEXTRINSV $129, X1, X2 // 22048277 + // [X]VSETEQZ.V, [X]VSETNEZ.V VSETEQV V1, FCC0 // 20989c72 VSETNEV V1, FCC0 // 209c9c72 diff --git a/src/cmd/asm/internal/asm/testdata/loong64enc3.s b/src/cmd/asm/internal/asm/testdata/loong64enc3.s index 2d83bd719a5..2dc6529dcb0 100644 --- a/src/cmd/asm/internal/asm/testdata/loong64enc3.s +++ b/src/cmd/asm/internal/asm/testdata/loong64enc3.s @@ -42,8 +42,10 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0 MOVB R4, 4096(R5) // 3e000014de971000c4030029 MOVBU R4, 65536(R5) // 1e020014de971000c4030029 MOVBU R4, 4096(R5) // 3e000014de971000c4030029 - SC R4, 65536(R5) // 1e020014de971000c4030021 - SC R4, 4096(R5) // 3e000014de971000c4030021 + SC R4, 65536(R5) // 1e040010de971000c4030021 + SCV R4, 65536(R5) // 1e040010de971000c4030023 + LL 65536(R5), R4 // 1e040010de971000c4030020 + LLV 65536(R5), R4 // 1e040010de971000c4030022 MOVW y+65540(FP), R4 // 1e020014de8f1000c4338028 MOVWU y+65540(FP), R4 // 1e020014de8f1000c433802a MOVV y+65540(FP), R4 // 1e020014de8f1000c433c028 @@ -122,6 +124,21 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0 XOR $4097, R4 // 3e000014de07800384f81500 XOR $4097, R4, R5 // 3e000014de07800385f81500 + MOVWP R5, -32768(R4) // 1efcff13de931000c5038025 + MOVWP R5, 32768(R4) // 1e000010de931000c5038025 + MOVWP R5, 65536(R4) // 1e040010de931000c5030025 + MOVWP R5, 1048576(R4) // 1e400010de931000c5030025 + MOVVP R5, -32768(R4) // 1efcff13de931000c5038027 + MOVVP R5, 65536(R4) // 1e040010de931000c5030027 + MOVVP R5, 1048576(R4) // 1e400010de931000c5030027 + MOVWP -32768(R5), R4 // 1efcff13de971000c4038024 + MOVWP 2229248(R5), R4 // 1e880010de971000c4030424 + MOVWP -2145518592(R5), R4 // 1e740012de971000c403fc24 + MOVVP -32768(R5), R4 // 1efcff13de971000c4038026 + MOVVP 2229248(R5), R4 // 1e880010de971000c4030426 + MOVVP -2145518592(R5), R4 // 1e740012de971000c403fc26 + + // MOVV C_DCON32_12S, r MOVV $0x27312345fffff800, R4 // MOVV $2824077224892692480, R4 // 0400a002a468241684cc0903 MOVV $0xf7312345fffff800, R4 // MOVV $-634687288927848448, R4 // 0400a002a468241684cc3d03 diff --git a/src/cmd/asm/internal/asm/testdata/loong64enc6.s b/src/cmd/asm/internal/asm/testdata/loong64enc6.s new file mode 100644 index 00000000000..bd19ea76012 --- /dev/null +++ b/src/cmd/asm/internal/asm/testdata/loong64enc6.s @@ -0,0 +1,12 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "../../../../../runtime/textflag.h" + +TEXT asmtest(SB),DUPOK|NOSPLIT,$0 + // MOVWP LOREG_64(Rx), Ry + MOVWP 81985529216486896(R4), R5 // 9e571315dec3b703feac6816de4b000384f8100085000025 + MOVWP -81985529216486896(R4), R5 // 7ea8ec14de4388031e539717deb73f0384f8100085000025 + MOVWP R4, 81985529216486896(R5) // 9e571315dec3b703feac6816de4b0003a5f81000a4000025 + MOVWP R4, -81985529216486896(R5) // 7ea8ec14de4388031e539717deb73f03a5f81000a4000025 diff --git a/src/cmd/asm/internal/asm/testdata/loong64error.s b/src/cmd/asm/internal/asm/testdata/loong64error.s index 2dcd34bf61c..1bc0ddea557 100644 --- a/src/cmd/asm/internal/asm/testdata/loong64error.s +++ b/src/cmd/asm/internal/asm/testdata/loong64error.s @@ -7,3 +7,8 @@ TEXT errors(SB),$0 XVSHUF4IV $16, X1, X2 // ERROR "operand out of range 0 to 15" ADDV16 $1, R4, R5 // ERROR "the constant must be a multiple of 65536." ADDV16 $65535, R4, R5 // ERROR "the constant must be a multiple of 65536." + SC R4, 1(R5) // ERROR "offset must be a multiple of 4." + SCV R4, 1(R5) // ERROR "offset must be a multiple of 4." + LL 1(R5), R4 // ERROR "offset must be a multiple of 4." + LLV 1(R5), R4 // ERROR "offset must be a multiple of 4." + diff --git a/src/cmd/asm/internal/asm/testdata/riscv64.s b/src/cmd/asm/internal/asm/testdata/riscv64.s index 702b82223b3..4615119af00 100644 --- a/src/cmd/asm/internal/asm/testdata/riscv64.s +++ b/src/cmd/asm/internal/asm/testdata/riscv64.s @@ -372,6 +372,76 @@ start: // 21.7: Double-Precision Floating-Point Classify Instruction FCLASSD F0, X5 // d31200e2 + // + // "C" Extension for Compressed Instructions, Version 2.0 + // + + // 26.3.1: Compressed Stack-Pointer-Based Loads and Stores + CLWSP 20(SP), X10 // 5245 + CLDSP 24(SP), X10 // 6265 + CFLDSP 32(SP), F10 // 0235 + CSWSP X10, 20(SP) // 2aca + CSDSP X10, 24(SP) // 2aec + CFSDSP F10, 32(SP) // 2ab0 + + // 26.3.2: Compressed Register-Based Loads and Stores + CLW 20(X10), X11 // 4c49 + CLD 24(X10), X11 // 0c6d + CFLD 32(X10), F11 // 0c31 + CSW X11, 20(X10) // 4cc9 + CSD X11, 24(X10) // 0ced + CFSD F11, 32(X10) // 0cb1 + + // 26.4: Compressed Control Transfer Instructions + CJ 1(PC) // 09a0 + CJR X5 // 8282 + CJALR X5 // 8292 + CBEQZ X10, 1(PC) // 09c1 + CBNEZ X10, 1(PC) // 09e1 + + // 26.5.1: Compressed Integer Constant-Generation Instructions + CLI $-32, X5 // 8152 + CLI $31, X5 // fd42 + CLUI $-32, X5 // 8172 + CLUI $31, X5 // fd62 + + // 26.5.2: Compressed Integer Register-Immediate Operations + CADD $-32, X5 // 8112 + CADD $31, X5 // fd02 + CADDI $-32, X5 // 8112 + CADDI $31, X5 // fd02 + CADDW $-32, X5 // 8132 + CADDW $31, X5 // fd22 + CADDIW $-32, X5 // 8132 + CADDIW $31, X5 // fd22 + CADDI16SP $-512, SP // 0171 + CADDI16SP $496, SP // 7d61 + CADDI4SPN $4, SP, X10 // 4800 + CADDI4SPN $1020, SP, X10 // e81f + CSLLI $63, X5 // fe12 + CSRLI $63, X10 // 7d91 + CSRAI $63, X10 // 7d95 + CAND $-32, X10 // 0199 + CAND $31, X10 // 7d89 + CANDI $-32, X10 // 0199 + CANDI $31, X10 // 7d89 + + // 26.5.3: Compressed Integer Register-Register Operations + CMV X6, X5 // 9a82 + CADD X9, X8 // 2694 + CAND X9, X8 // 658c + COR X9, X8 // 458c + CXOR X9, X8 // 258c + CSUB X9, X8 // 058c + CADDW X9, X8 // 259c + CSUBW X9, X8 // 059c + + // 26.5.5: Compressed NOP Instruction + CNOP // 0100 + + // 26.5.6: Compressed Breakpoint Instruction + CEBREAK // 0290 + // 28.4.1: Address Generation Instructions (Zba) ADDUW X10, X11, X12 // 3b86a508 ADDUW X10, X11 // bb85a508 diff --git a/src/cmd/asm/internal/asm/testdata/riscv64validation.s b/src/cmd/asm/internal/asm/testdata/riscv64validation.s index 65497659167..6a2e5f92dee 100644 --- a/src/cmd/asm/internal/asm/testdata/riscv64validation.s +++ b/src/cmd/asm/internal/asm/testdata/riscv64validation.s @@ -12,6 +12,147 @@ TEXT validation(SB),$0 SRLI $1, X5, F1 // ERROR "expected integer register in rd position but got non-integer register F1" SRLI $1, F1, X5 // ERROR "expected integer register in rs1 position but got non-integer register F1" + WORD $-1 // ERROR "must be in range [0x0, 0xffffffff]" + WORD $0x100000000 // ERROR "must be in range [0x0, 0xffffffff]" + + // + // "C" Extension for Compressed Instructions, Version 2.0 + // + CLWSP 20(X5), X10 // ERROR "rs2 must be SP/X2" + CLWSP 20(SP), X0 // ERROR "cannot use register X0" + CLWSP 20(SP), F10 // ERROR "expected integer register in rd position" + CLWSP 22(SP), X10 // ERROR "must be a multiple of 4" + CLDSP 24(X5), X10 // ERROR "rs2 must be SP/X2" + CLDSP 24(SP), X0 // ERROR "cannot use register X0" + CLDSP 24(SP), F10 // ERROR "expected integer register in rd position" + CLDSP 28(SP), X10 // ERROR "must be a multiple of 8" + CFLDSP 32(X5), F10 // ERROR "rs2 must be SP/X2" + CFLDSP 32(SP), X10 // ERROR "expected float register in rd position" + CFLDSP 36(SP), F10 // ERROR "must be a multiple of 8" + CSWSP X10, 20(X5) // ERROR "rd must be SP/X2" + CSWSP F10, 20(SP) // ERROR "expected integer register in rs2 position" + CSWSP X10, 22(SP) // ERROR "must be a multiple of 4" + CSDSP X10, 24(X5) // ERROR "rd must be SP/X2" + CSDSP F10, 24(SP) // ERROR "expected integer register in rs2 position" + CSDSP X10, 28(SP) // ERROR "must be a multiple of 8" + CFSDSP F10, 32(X5) // ERROR "rd must be SP/X2" + CFSDSP X10, 32(SP) // ERROR "expected float register in rs2 position" + CFSDSP F10, 36(SP) // ERROR "must be a multiple of 8" + CLW 20(X10), F11 // ERROR "expected integer prime register in rd position" + CLW 20(X5), X11 // ERROR "expected integer prime register in rs1 position" + CLW 20(X10), X5 // ERROR "expected integer prime register in rd position" + CLW -1(X10), X11 // ERROR "must be in range [0, 127]" + CLW 22(X10), X11 // ERROR "must be a multiple of 4" + CLW 128(X10), X11 // ERROR "must be in range [0, 127]" + CLD 24(X10), F11 // ERROR "expected integer prime register in rd position" + CLD 24(X5), X11 // ERROR "expected integer prime register in rs1 position" + CLD -1(X10), X11 // ERROR "must be in range [0, 255]" + CLD 30(X10), X11 // ERROR "must be a multiple of 8" + CLD 256(X10), X11 // ERROR "must be in range [0, 255]" + CFLD 32(X10), X11 // ERROR "expected float prime register in rd position" + CFLD 32(X5), F11 // ERROR "expected integer prime register in rs1 position" + CFLD -1(X10), F11 // ERROR "must be in range [0, 255]" + CFLD 34(X10), F11 // ERROR "must be a multiple of 8" + CFLD 256(X10), F11 // ERROR "must be in range [0, 255]" + CSW F11, 20(X10) // ERROR "expected integer prime register in rs2 position" + CSW X11, -1(X10) // ERROR "must be in range [0, 127]" + CSW X11, 22(X10) // ERROR "must be a multiple of 4" + CSW X11, 128(X10) // ERROR "must be in range [0, 127]" + CSD F11, 24(X10) // ERROR "expected integer prime register in rs2 position" + CSD X11, -1(X10) // ERROR "must be in range [0, 255]" + CSD X11, 28(X10) // ERROR "must be a multiple of 8" + CSD X11, 256(X10) // ERROR "must be in range [0, 255]" + CFSD X11, 32(X10) // ERROR "expected float prime register in rs2 position" + CFSD F11, -1(X10) // ERROR "must be in range [0, 255]" + CFSD F11, 36(X10) // ERROR "must be a multiple of 8" + CFSD F11, 256(X10) // ERROR "must be in range [0, 255]" + CJR X0 // ERROR "cannot use register X0 in rs1" + CJR X10, X11 // ERROR "expected no register in rs2" + CJALR X0 // ERROR "cannot use register X0 in rs1" + CJALR X10, X11 // ERROR "expected no register in rd" + CBEQZ X5, 1(PC) // ERROR "expected integer prime register in rs1" + CBNEZ X5, 1(PC) // ERROR "expected integer prime register in rs1" + CLI $3, X0 // ERROR "cannot use register X0 in rd" + CLI $-33, X5 // ERROR "must be in range [-32, 31]" + CLI $32, X5 // ERROR "must be in range [-32, 31]" + CLUI $0, X5 // ERROR "immediate cannot be zero" + CLUI $3, X0 // ERROR "cannot use register X0 in rd" + CLUI $3, X2 // ERROR "cannot use register SP/X2 in rd" + CLUI $-33, X5 // ERROR "must be in range [-32, 31]" + CLUI $32, X5 // ERROR "must be in range [-32, 31]" + CADD $31, X5, X6 // ERROR "rd must be the same as rs1" + CADD $-33, X5 // ERROR "must be in range [-32, 31]" + CADD $32, X5 // ERROR "must be in range [-32, 31]" + CADDI $0, X5 // ERROR "immediate cannot be zero" + CADDI $31, X5, X6 // ERROR "rd must be the same as rs1" + CADDI $-33, X5 // ERROR "must be in range [-32, 31]" + CADDI $32, X5 // ERROR "must be in range [-32, 31]" + CADDW $-33, X5 // ERROR "must be in range [-32, 31]" + CADDW $32, X5 // ERROR "must be in range [-32, 31]" + CADDIW $-33, X5 // ERROR "must be in range [-32, 31]" + CADDIW $32, X5 // ERROR "must be in range [-32, 31]" + CADDI16SP $0, SP // ERROR "immediate cannot be zero" + CADDI16SP $16, X5 // ERROR "rd must be SP/X2" + CADDI16SP $-513, SP // ERROR "must be in range [-512, 511]" + CADDI16SP $20, SP // ERROR "must be a multiple of 16" + CADDI16SP $512, SP // ERROR "must be in range [-512, 511]" + CADDI4SPN $4, SP, X5 // ERROR "expected integer prime register in rd" + CADDI4SPN $4, X5, X10 // ERROR "SP/X2 must be in rs1" + CADDI4SPN $-1, SP, X10 // ERROR "must be in range [0, 1023]" + CADDI4SPN $0, SP, X10 // ERROR "immediate cannot be zero" + CADDI4SPN $6, SP, X10 // ERROR "must be a multiple of 4" + CADDI4SPN $1024, SP, X10 // ERROR "must be in range [0, 1023]" + CSLLI $63, X5, X6 // ERROR "rd must be the same as rs1" + CSLLI $-1, X5 // ERROR "must be in range [0, 63]" + CSLLI $0, X5 // ERROR "immediate cannot be zero" + CSLLI $64, X5 // ERROR "must be in range [0, 63]" + CSRLI $63, X10, X11 // ERROR "rd must be the same as rs1" + CSRLI $63, X5 // ERROR "expected integer prime register in rd" + CSRLI $-1, X10 // ERROR "must be in range [0, 63]" + CSRLI $0, X10 // ERROR "immediate cannot be zero" + CSRLI $64, X10 // ERROR "must be in range [0, 63]" + CSRAI $63, X10, X11 // ERROR "rd must be the same as rs1" + CSRAI $63, X5 // ERROR "expected integer prime register in rd" + CSRAI $-1, X10 // ERROR "must be in range [0, 63]" + CSRAI $0, X10 // ERROR "immediate cannot be zero" + CSRAI $64, X10 // ERROR "must be in range [0, 63]" + CAND $1, X10, X11 // ERROR "rd must be the same as rs1" + CAND $1, X5 // ERROR "expected integer prime register in rd" + CAND $-64, X10 // ERROR "must be in range [-32, 31]" + CAND $63, X10 // ERROR "must be in range [-32, 31]" + CANDI $1, X10, X11 // ERROR "rd must be the same as rs1" + CANDI $1, X5 // ERROR "expected integer prime register in rd" + CANDI $-64, X10 // ERROR "must be in range [-32, 31]" + CANDI $63, X10 // ERROR "must be in range [-32, 31]" + CMV X0, X5 // ERROR "cannot use register X0 in rs2" + CMV X5, X6, X7 // ERROR "expected no register in rs1" + CMV X5, X0 // ERROR "cannot use register X0 in rd" + CMV F1, X5 // ERROR "expected integer register in rs2" + CMV X5, F1 // ERROR "expected integer register in rd" + CADD X5, X6, X7 // ERROR "rd must be the same as rs1" + CADD X0, X8 // ERROR "cannot use register X0 in rs2" + CADD X8, X0 // ERROR "cannot use register X0 in rd" + CAND X10, X11, X12 // ERROR "rd must be the same as rs1" + CAND X5, X11 // ERROR "expected integer prime register in rs2" + CAND X10, X5 // ERROR "expected integer prime register in rd" + COR X10, X11, X12 // ERROR "rd must be the same as rs1" + COR X5, X11 // ERROR "expected integer prime register in rs2" + COR X10, X5 // ERROR "expected integer prime register in rd" + CXOR X10, X11, X12 // ERROR "rd must be the same as rs1" + CXOR X5, X11 // ERROR "expected integer prime register in rs2" + CXOR X10, X5 // ERROR "expected integer prime register in rd" + CSUB X10, X11, X12 // ERROR "rd must be the same as rs1" + CSUB X5, X11 // ERROR "expected integer prime register in rs2" + CSUB X10, X5 // ERROR "expected integer prime register in rd" + CADDW X10, X11, X12 // ERROR "rd must be the same as rs1" + CADDW X5, X11 // ERROR "expected integer prime register in rs2" + CADDW X10, X5 // ERROR "expected integer prime register in rd" + CSUBW X10, X11, X12 // ERROR "rd must be the same as rs1" + CSUBW X5, X11 // ERROR "expected integer prime register in rs2" + CSUBW X10, X5 // ERROR "expected integer prime register in rd" + CNOP X10 // ERROR "expected no register in rs2" + CEBREAK X10 // ERROR "expected no register in rs2" + // // "V" Standard Extension for Vector Operations, Version 1.0 // diff --git a/src/cmd/asm/internal/lex/input.go b/src/cmd/asm/internal/lex/input.go index 789e229a779..342ac5ac483 100644 --- a/src/cmd/asm/internal/lex/input.go +++ b/src/cmd/asm/internal/lex/input.go @@ -68,7 +68,7 @@ func predefine(defines flags.MultiFlag) map[string]*Macro { var panicOnError bool // For testing. -func (in *Input) Error(args ...interface{}) { +func (in *Input) Error(args ...any) { if panicOnError { panic(fmt.Errorf("%s:%d: %s", in.File(), in.Line(), fmt.Sprintln(args...))) } @@ -77,7 +77,7 @@ func (in *Input) Error(args ...interface{}) { } // expectText is like Error but adds "got XXX" where XXX is a quoted representation of the most recent token. -func (in *Input) expectText(args ...interface{}) { +func (in *Input) expectText(args ...any) { in.Error(append(args, "; got", strconv.Quote(in.Stack.Text()))...) } diff --git a/src/cmd/asm/main.go b/src/cmd/asm/main.go index 2a9ebe9b3e2..f2697db5169 100644 --- a/src/cmd/asm/main.go +++ b/src/cmd/asm/main.go @@ -58,7 +58,7 @@ func main() { // nothing case "index": // known to compiler; ignore here so people can use - // the same list with -gcflags=-spectre=LIST and -asmflags=-spectrre=LIST + // the same list with -gcflags=-spectre=LIST and -asmflags=-spectre=LIST case "all", "ret": ctxt.Retpoline = true } @@ -93,7 +93,7 @@ func main() { for _, f := range flag.Args() { lexer := lex.NewLexer(f) parser := asm.NewParser(ctxt, architecture, lexer) - ctxt.DiagFunc = func(format string, args ...interface{}) { + ctxt.DiagFunc = func(format string, args ...any) { diag = true log.Printf(format, args...) } diff --git a/src/cmd/cgo/ast.go b/src/cmd/cgo/ast.go index 861479db7ac..2da6ca5a30f 100644 --- a/src/cmd/cgo/ast.go +++ b/src/cmd/cgo/ast.go @@ -199,7 +199,7 @@ func commentText(g *ast.CommentGroup) string { return strings.Join(pieces, "") } -func (f *File) validateIdents(x interface{}, context astContext) { +func (f *File) validateIdents(x any, context astContext) { if x, ok := x.(*ast.Ident); ok { if f.isMangledName(x.Name) { error_(x.Pos(), "identifier %q may conflict with identifiers generated by cgo", x.Name) @@ -208,7 +208,7 @@ func (f *File) validateIdents(x interface{}, context astContext) { } // Save various references we are going to need later. -func (f *File) saveExprs(x interface{}, context astContext) { +func (f *File) saveExprs(x any, context astContext) { switch x := x.(type) { case *ast.Expr: switch (*x).(type) { @@ -278,7 +278,7 @@ func (f *File) saveCall(call *ast.CallExpr, context astContext) { } // If a function should be exported add it to ExpFunc. -func (f *File) saveExport(x interface{}, context astContext) { +func (f *File) saveExport(x any, context astContext) { n, ok := x.(*ast.FuncDecl) if !ok { return @@ -318,7 +318,7 @@ func (f *File) saveExport(x interface{}, context astContext) { } // Make f.ExpFunc[i] point at the Func from this AST instead of the other one. -func (f *File) saveExport2(x interface{}, context astContext) { +func (f *File) saveExport2(x any, context astContext) { n, ok := x.(*ast.FuncDecl) if !ok { return @@ -355,7 +355,7 @@ const ( ) // walk walks the AST x, calling visit(f, x, context) for each node. -func (f *File) walk(x interface{}, context astContext, visit func(*File, interface{}, astContext)) { +func (f *File) walk(x any, context astContext, visit func(*File, any, astContext)) { visit(f, x, context) switch n := x.(type) { case *ast.Expr: @@ -363,7 +363,8 @@ func (f *File) walk(x interface{}, context astContext, visit func(*File, interfa // everything else just recurs default: - f.walkUnexpected(x, context, visit) + error_(token.NoPos, "unexpected type %T in walk", x) + panic("unexpected type") case nil: @@ -396,6 +397,9 @@ func (f *File) walk(x interface{}, context astContext, visit func(*File, interfa case *ast.IndexExpr: f.walk(&n.X, ctxExpr, visit) f.walk(&n.Index, ctxExpr, visit) + case *ast.IndexListExpr: + f.walk(&n.X, ctxExpr, visit) + f.walk(n.Indices, ctxExpr, visit) case *ast.SliceExpr: f.walk(&n.X, ctxExpr, visit) if n.Low != nil { @@ -434,8 +438,8 @@ func (f *File) walk(x interface{}, context astContext, visit func(*File, interfa case *ast.StructType: f.walk(n.Fields, ctxField, visit) case *ast.FuncType: - if tparams := funcTypeTypeParams(n); tparams != nil { - f.walk(tparams, ctxParam, visit) + if n.TypeParams != nil { + f.walk(n.TypeParams, ctxParam, visit) } f.walk(n.Params, ctxParam, visit) if n.Results != nil { @@ -524,8 +528,8 @@ func (f *File) walk(x interface{}, context astContext, visit func(*File, interfa f.walk(n.Values, ctxExpr, visit) } case *ast.TypeSpec: - if tparams := typeSpecTypeParams(n); tparams != nil { - f.walk(tparams, ctxParam, visit) + if n.TypeParams != nil { + f.walk(n.TypeParams, ctxParam, visit) } f.walk(&n.Type, ctxType, visit) diff --git a/src/cmd/cgo/ast_go1.go b/src/cmd/cgo/ast_go1.go deleted file mode 100644 index 2f65f0f7183..00000000000 --- a/src/cmd/cgo/ast_go1.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build compiler_bootstrap - -package main - -import ( - "go/ast" - "go/token" -) - -func (f *File) walkUnexpected(x interface{}, context astContext, visit func(*File, interface{}, astContext)) { - error_(token.NoPos, "unexpected type %T in walk", x) - panic("unexpected type") -} - -func funcTypeTypeParams(n *ast.FuncType) *ast.FieldList { - return nil -} - -func typeSpecTypeParams(n *ast.TypeSpec) *ast.FieldList { - return nil -} diff --git a/src/cmd/cgo/ast_go118.go b/src/cmd/cgo/ast_go118.go deleted file mode 100644 index ced30728dc9..00000000000 --- a/src/cmd/cgo/ast_go118.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !compiler_bootstrap - -package main - -import ( - "go/ast" - "go/token" -) - -func (f *File) walkUnexpected(x interface{}, context astContext, visit func(*File, interface{}, astContext)) { - switch n := x.(type) { - default: - error_(token.NoPos, "unexpected type %T in walk", x) - panic("unexpected type") - - case *ast.IndexListExpr: - f.walk(&n.X, ctxExpr, visit) - f.walk(n.Indices, ctxExpr, visit) - } -} - -func funcTypeTypeParams(n *ast.FuncType) *ast.FieldList { - return n.TypeParams -} - -func typeSpecTypeParams(n *ast.TypeSpec) *ast.FieldList { - return n.TypeParams -} diff --git a/src/cmd/cgo/doc.go b/src/cmd/cgo/doc.go index ef5272299bb..7e8486874ef 100644 --- a/src/cmd/cgo/doc.go +++ b/src/cmd/cgo/doc.go @@ -127,7 +127,7 @@ environment variable when running the go tool: set it to 1 to enable the use of cgo, and to 0 to disable it. The go tool will set the build constraint "cgo" if cgo is enabled. The special import "C" implies the "cgo" build constraint, as though the file also said -"//go:build cgo". Therefore, if cgo is disabled, files that import +"//go:build cgo". Therefore, if cgo is disabled, files that import "C" will not be built by the go tool. (For more about build constraints see https://golang.org/pkg/go/build/#hdr-Build_Constraints). diff --git a/src/cmd/cgo/gcc.go b/src/cmd/cgo/gcc.go index 6c1695bdb0a..d3de3906b48 100644 --- a/src/cmd/cgo/gcc.go +++ b/src/cmd/cgo/gcc.go @@ -1056,7 +1056,7 @@ func (p *Package) rewriteCall(f *File, call *Call) (string, bool) { func (p *Package) needsPointerCheck(f *File, t ast.Expr, arg ast.Expr) bool { // An untyped nil does not need a pointer check, and when // _cgoCheckPointer returns the untyped nil the type assertion we - // are going to insert will fail. Easier to just skip nil arguments. + // are going to insert will fail. Easier to just skip nil arguments. // TODO: Note that this fails if nil is shadowed. if id, ok := arg.(*ast.Ident); ok && id.Name == "nil" { return false @@ -1158,7 +1158,7 @@ func (p *Package) hasPointer(f *File, t ast.Expr, top bool) bool { // If addPosition is true, add position info to the idents of C names in arg. func (p *Package) mangle(f *File, arg *ast.Expr, addPosition bool) (ast.Expr, bool) { needsUnsafe := false - f.walk(arg, ctxExpr, func(f *File, arg interface{}, context astContext) { + f.walk(arg, ctxExpr, func(f *File, arg any, context astContext) { px, ok := arg.(*ast.Expr) if !ok { return @@ -2154,7 +2154,7 @@ func (p *Package) gccDebug(stdin []byte, nnames int) (d *dwarf.Data, ints []int6 for _, s := range f.Symbols { switch { case isDebugInts(s.Name): - if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) { + if i := s.SectionNumber - 1; 0 <= i && i < len(f.Sections) { sect := f.Sections[i] if s.Value < sect.Size { if sdat, err := sect.Data(); err == nil { @@ -2167,7 +2167,7 @@ func (p *Package) gccDebug(stdin []byte, nnames int) (d *dwarf.Data, ints []int6 } } case isDebugFloats(s.Name): - if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) { + if i := s.SectionNumber - 1; 0 <= i && i < len(f.Sections) { sect := f.Sections[i] if s.Value < sect.Size { if sdat, err := sect.Data(); err == nil { @@ -2181,7 +2181,7 @@ func (p *Package) gccDebug(stdin []byte, nnames int) (d *dwarf.Data, ints []int6 } default: if n := indexOfDebugStr(s.Name); n != -1 { - if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) { + if i := s.SectionNumber - 1; 0 <= i && i < len(f.Sections) { sect := f.Sections[i] if s.Value < sect.Size { if sdat, err := sect.Data(); err == nil { @@ -2193,7 +2193,7 @@ func (p *Package) gccDebug(stdin []byte, nnames int) (d *dwarf.Data, ints []int6 break } if n := indexOfDebugStrlen(s.Name); n != -1 { - if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) { + if i := s.SectionNumber - 1; 0 <= i && i < len(f.Sections) { sect := f.Sections[i] if s.Value < sect.Size { if sdat, err := sect.Data(); err == nil { @@ -2439,7 +2439,7 @@ func (tr *TypeRepr) Empty() bool { // Set modifies the type representation. // If fargs are provided, repr is used as a format for fmt.Sprintf. // Otherwise, repr is used unprocessed as the type representation. -func (tr *TypeRepr) Set(repr string, fargs ...interface{}) { +func (tr *TypeRepr) Set(repr string, fargs ...any) { tr.Repr = repr tr.FormatArgs = fargs } @@ -2713,7 +2713,7 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ // so execute the basic things that the struct case would do // other than try to determine a Go representation. tt := *t - tt.C = &TypeRepr{"%s %s", []interface{}{dt.Kind, tag}} + tt.C = &TypeRepr{"%s %s", []any{dt.Kind, tag}} // We don't know what the representation of this struct is, so don't let // anyone allocate one on the Go side. As a side effect of this annotation, // pointers to this type will not be considered pointers in Go. They won't @@ -2743,7 +2743,7 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ t.Align = align tt := *t if tag != "" { - tt.C = &TypeRepr{"struct %s", []interface{}{tag}} + tt.C = &TypeRepr{"struct %s", []any{tag}} } tt.Go = g if c.incompleteStructs[tag] { @@ -3010,7 +3010,7 @@ func (c *typeConv) FuncType(dtype *dwarf.FuncType, pos token.Pos) *FuncType { for i, f := range dtype.ParamType { // gcc's DWARF generator outputs a single DotDotDotType parameter for // function pointers that specify no parameters (e.g. void - // (*__cgo_0)()). Treat this special case as void. This case is + // (*__cgo_0)()). Treat this special case as void. This case is // invalid according to ISO C anyway (i.e. void (*__cgo_1)(...) is not // legal). if _, ok := f.(*dwarf.DotDotDotType); ok && i == 0 { @@ -3081,7 +3081,7 @@ func (c *typeConv) Struct(dt *dwarf.StructType, pos token.Pos) (expr *ast.Struct off := int64(0) // Rename struct fields that happen to be named Go keywords into - // _{keyword}. Create a map from C ident -> Go ident. The Go ident will + // _{keyword}. Create a map from C ident -> Go ident. The Go ident will // be mangled. Any existing identifier that already has the same name on // the C-side will cause the Go-mangled version to be prefixed with _. // (e.g. in a struct with fields '_type' and 'type', the latter would be @@ -3309,7 +3309,7 @@ func godefsFields(fld []*ast.Field) { // fieldPrefix returns the prefix that should be removed from all the // field names when generating the C or Go code. For generated // C, we leave the names as is (tv_sec, tv_usec), since that's what -// people are used to seeing in C. For generated Go code, such as +// people are used to seeing in C. For generated Go code, such as // package syscall's data structures, we drop a common prefix // (so sec, usec, which will get turned into Sec, Usec for exporting). func fieldPrefix(fld []*ast.Field) string { @@ -3456,7 +3456,7 @@ func (c *typeConv) badCFType(dt *dwarf.TypedefType) bool { // Tagged pointer support // Low-bit set means tagged object, next 3 bits (currently) // define the tagged object class, next 4 bits are for type -// information for the specific tagged object class. Thus, +// information for the specific tagged object class. Thus, // the low byte is for type info, and the rest of a pointer // (32 or 64-bit) is for payload, whatever the tagged class. // diff --git a/src/cmd/cgo/godefs.go b/src/cmd/cgo/godefs.go index 9cf626c1732..93f90271571 100644 --- a/src/cmd/cgo/godefs.go +++ b/src/cmd/cgo/godefs.go @@ -117,7 +117,7 @@ func (p *Package) godefs(f *File, args []string) string { var gofmtBuf strings.Builder // gofmt returns the gofmt-formatted string for an AST node. -func gofmt(n interface{}) string { +func gofmt(n any) string { gofmtBuf.Reset() err := printer.Fprint(&gofmtBuf, fset, n) if err != nil { diff --git a/src/cmd/cgo/internal/swig/swig_test.go b/src/cmd/cgo/internal/swig/swig_test.go index 603dab4917c..9d5ea0051ac 100644 --- a/src/cmd/cgo/internal/swig/swig_test.go +++ b/src/cmd/cgo/internal/swig/swig_test.go @@ -80,7 +80,7 @@ func mustHaveCxx(t *testing.T) { if len(args) == 0 { t.Skip("no C++ compiler") } - testenv.MustHaveExecPath(t, string(args[0])) + testenv.MustHaveExecPath(t, args[0]) } var ( diff --git a/src/cmd/cgo/internal/test/buildid_linux.go b/src/cmd/cgo/internal/test/buildid_linux.go index 84d3edb664e..7e0fd0fd126 100644 --- a/src/cmd/cgo/internal/test/buildid_linux.go +++ b/src/cmd/cgo/internal/test/buildid_linux.go @@ -4,9 +4,9 @@ package cgotest -// Test that we have no more than one build ID. In the past we used +// Test that we have no more than one build ID. In the past we used // to generate a separate build ID for each package using cgo, and the -// linker concatenated them all. We don't want that--we only want +// linker concatenated them all. We don't want that--we only want // one. import ( @@ -42,7 +42,7 @@ sections: for len(d) > 0 { // ELF standards differ as to the sizes in - // note sections. Both the GNU linker and + // note sections. Both the GNU linker and // gold always generate 32-bit sizes, so that // is what we assume here. diff --git a/src/cmd/cgo/internal/test/callback.go b/src/cmd/cgo/internal/test/callback.go index 478bf8294af..8f8dd8fded6 100644 --- a/src/cmd/cgo/internal/test/callback.go +++ b/src/cmd/cgo/internal/test/callback.go @@ -40,7 +40,7 @@ func nestedCall(f func()) { callbackMutex.Unlock() // Pass the address of i because the C function was written to - // take a pointer. We could pass an int if we felt like + // take a pointer. We could pass an int if we felt like // rewriting the C code. C.callback(unsafe.Pointer(&i)) diff --git a/src/cmd/cgo/internal/test/cgo_darwin_test.go b/src/cmd/cgo/internal/test/cgo_darwin_test.go new file mode 100644 index 00000000000..5d9d1640021 --- /dev/null +++ b/src/cmd/cgo/internal/test/cgo_darwin_test.go @@ -0,0 +1,11 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo && darwin + +package cgotest + +import "testing" + +func TestIssue76023(t *testing.T) { issue76023(t) } diff --git a/src/cmd/cgo/internal/test/gcc68255/a.go b/src/cmd/cgo/internal/test/gcc68255/a.go index e106dee3ec0..cc4804b90bd 100644 --- a/src/cmd/cgo/internal/test/gcc68255/a.go +++ b/src/cmd/cgo/internal/test/gcc68255/a.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Test that it's OK to have C code that does nothing other than -// initialize a global variable. This used to fail with gccgo. +// initialize a global variable. This used to fail with gccgo. package gcc68255 diff --git a/src/cmd/cgo/internal/test/issue76023.go b/src/cmd/cgo/internal/test/issue76023.go new file mode 100644 index 00000000000..7fe8ae53f7e --- /dev/null +++ b/src/cmd/cgo/internal/test/issue76023.go @@ -0,0 +1,27 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin + +package cgotest + +/* +#cgo LDFLAGS: -Wl,-undefined,dynamic_lookup + +extern void __gotest_cgo_null_api(void) __attribute__((weak_import)); + +int issue76023(void) { + if (__gotest_cgo_null_api) return 1; + return 0; +} +*/ +import "C" +import "testing" + +func issue76023(t *testing.T) { + r := C.issue76023() + if r != 0 { + t.Error("found __gotest_cgo_null_api") + } +} diff --git a/src/cmd/cgo/internal/testcarchive/carchive_test.go b/src/cmd/cgo/internal/testcarchive/carchive_test.go index c0ad79f2314..7c6e2b1d848 100644 --- a/src/cmd/cgo/internal/testcarchive/carchive_test.go +++ b/src/cmd/cgo/internal/testcarchive/carchive_test.go @@ -102,14 +102,14 @@ func testMain(m *testing.M) int { bin = cmdToRun("./testp") ccOut := goEnv("CC") - cc = []string{string(ccOut)} + cc = []string{ccOut} out := goEnv("GOGCCFLAGS") quote := '\000' start := 0 lastSpace := true backslash := false - s := string(out) + s := out for i, c := range s { if quote == '\000' && unicode.IsSpace(c) { if !lastSpace { diff --git a/src/cmd/cgo/internal/testcshared/cshared_test.go b/src/cmd/cgo/internal/testcshared/cshared_test.go index 2ce705adba4..c01f5cf2cfb 100644 --- a/src/cmd/cgo/internal/testcshared/cshared_test.go +++ b/src/cmd/cgo/internal/testcshared/cshared_test.go @@ -76,7 +76,7 @@ func testMain(m *testing.M) int { start := 0 lastSpace := true backslash := false - s := string(out) + s := out for i, c := range s { if quote == '\000' && unicode.IsSpace(c) { if !lastSpace { diff --git a/src/cmd/cgo/internal/testerrors/badsym_test.go b/src/cmd/cgo/internal/testerrors/badsym_test.go index 4fd5c44505a..756ffdb1fc7 100644 --- a/src/cmd/cgo/internal/testerrors/badsym_test.go +++ b/src/cmd/cgo/internal/testerrors/badsym_test.go @@ -186,7 +186,7 @@ func cCompilerCmd(t *testing.T) []string { start := 0 lastSpace := true backslash := false - s := string(out) + s := out for i, c := range s { if quote == '\000' && unicode.IsSpace(c) { if !lastSpace { diff --git a/src/cmd/cgo/internal/testerrors/ptr_test.go b/src/cmd/cgo/internal/testerrors/ptr_test.go index beba0d26ac1..bc1cc1c6e08 100644 --- a/src/cmd/cgo/internal/testerrors/ptr_test.go +++ b/src/cmd/cgo/internal/testerrors/ptr_test.go @@ -14,6 +14,7 @@ import ( "os" "os/exec" "path/filepath" + "regexp" "slices" "strings" "sync/atomic" @@ -24,15 +25,16 @@ var tmp = flag.String("tmp", "", "use `dir` for temporary files and do not clean // ptrTest is the tests without the boilerplate. type ptrTest struct { - name string // for reporting - c string // the cgo comment - c1 string // cgo comment forced into non-export cgo file - imports []string // a list of imports - support string // supporting functions - body string // the body of the main function - extra []extra // extra files - fail bool // whether the test should fail - expensive bool // whether the test requires the expensive check + name string // for reporting + c string // the cgo comment + c1 string // cgo comment forced into non-export cgo file + imports []string // a list of imports + support string // supporting functions + body string // the body of the main function + extra []extra // extra files + fail bool // whether the test should fail + expensive bool // whether the test requires the expensive check + errTextRegexp string // error text regexp; if empty, use the pattern `.*unpinned Go.*` } type extra struct { @@ -489,6 +491,27 @@ var ptrTests = []ptrTest{ body: `i := 0; a := &[2]unsafe.Pointer{nil, unsafe.Pointer(&i)}; C.f45(&a[0])`, fail: true, }, + { + // Passing a Go map as argument to C. + name: "argmap", + c: `void f46(void* p) {}`, + imports: []string{"unsafe"}, + body: `m := map[int]int{0: 1,}; C.f46(unsafe.Pointer(&m))`, + fail: true, + errTextRegexp: `.*argument of cgo function has Go pointer to unpinned Go map`, + }, + { + // Returning a Go map to C. + name: "retmap", + c: `extern void f47();`, + support: `//export GoMap47 + func GoMap47() map[int]int { return map[int]int{0: 1,} }`, + body: `C.f47()`, + c1: `extern void* GoMap47(); + void f47() { GoMap47(); }`, + fail: true, + errTextRegexp: `.*result of Go function GoMap47 called from cgo is unpinned Go map or points to unpinned Go map.*`, + }, } func TestPointerChecks(t *testing.T) { @@ -519,7 +542,6 @@ func TestPointerChecks(t *testing.T) { // after testOne finishes. var pending int32 for _, pt := range ptrTests { - pt := pt t.Run(pt.name, func(t *testing.T) { atomic.AddInt32(&pending, +1) defer func() { @@ -690,11 +712,17 @@ func testOne(t *testing.T, pt ptrTest, exe, exe2 string) { } buf, err := runcmd(cgocheck) + + var pattern string = pt.errTextRegexp + if pt.errTextRegexp == "" { + pattern = `.*unpinned Go.*` + } + if pt.fail { if err == nil { t.Logf("%s", buf) t.Fatalf("did not fail as expected") - } else if !bytes.Contains(buf, []byte("Go pointer")) { + } else if ok, _ := regexp.Match(pattern, buf); !ok { t.Logf("%s", buf) t.Fatalf("did not print expected error (failed with %v)", err) } diff --git a/src/cmd/cgo/internal/testout/out_test.go b/src/cmd/cgo/internal/testout/out_test.go index 81dfa365871..e8ea5092a35 100644 --- a/src/cmd/cgo/internal/testout/out_test.go +++ b/src/cmd/cgo/internal/testout/out_test.go @@ -8,8 +8,8 @@ import ( "bufio" "bytes" "fmt" - "internal/testenv" "internal/goarch" + "internal/testenv" "os" "path/filepath" "regexp" diff --git a/src/cmd/cgo/internal/testplugin/plugin_test.go b/src/cmd/cgo/internal/testplugin/plugin_test.go index 2afb542ec4f..3216073edbc 100644 --- a/src/cmd/cgo/internal/testplugin/plugin_test.go +++ b/src/cmd/cgo/internal/testplugin/plugin_test.go @@ -37,7 +37,7 @@ func TestMain(m *testing.M) { var tmpDir string // prettyPrintf prints lines with tmpDir sanitized. -func prettyPrintf(format string, args ...interface{}) { +func prettyPrintf(format string, args ...any) { s := fmt.Sprintf(format, args...) if tmpDir != "" { s = strings.ReplaceAll(s, tmpDir, "$TMPDIR") diff --git a/src/cmd/cgo/internal/teststdio/testdata/fib.go b/src/cmd/cgo/internal/teststdio/testdata/fib.go index 96173683353..69147880c20 100644 --- a/src/cmd/cgo/internal/teststdio/testdata/fib.go +++ b/src/cmd/cgo/internal/teststdio/testdata/fib.go @@ -5,7 +5,7 @@ //go:build test_run // Compute Fibonacci numbers with two goroutines -// that pass integers back and forth. No actual +// that pass integers back and forth. No actual // concurrency, just threads and synchronization // and foreign code on multiple pthreads. diff --git a/src/cmd/cgo/main.go b/src/cmd/cgo/main.go index 5e08427daf9..ba8e52a6e02 100644 --- a/src/cmd/cgo/main.go +++ b/src/cmd/cgo/main.go @@ -72,8 +72,8 @@ type File struct { ExpFunc []*ExpFunc // exported functions for this file Name map[string]*Name // map from Go name to Name NamePos map[*Name]token.Pos // map from Name to position of the first reference - NoCallbacks map[string]bool // C function names that with #cgo nocallback directive - NoEscapes map[string]bool // C function names that with #cgo noescape directive + NoCallbacks map[string]bool // C function names with #cgo nocallback directive + NoEscapes map[string]bool // C function names with #cgo noescape directive Edit *edit.Buffer debugs []*debug // debug data from iterations of gccDebug. Initialized by File.loadDebug. @@ -148,7 +148,7 @@ type ExpFunc struct { // A TypeRepr contains the string representation of a type. type TypeRepr struct { Repr string - FormatArgs []interface{} + FormatArgs []any } // A Type collects information about a type in both the C and Go worlds. diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go index a2bcdf89c5a..701a8530ffc 100644 --- a/src/cmd/cgo/out.go +++ b/src/cmd/cgo/out.go @@ -649,13 +649,15 @@ func (p *Package) writeDefsFunc(fgo2 io.Writer, n *Name, callsMalloc *bool) { if p.noEscapes[n.C] && p.noCallbacks[n.C] { touchFunc = "_Cgo_keepalive" } - fmt.Fprintf(fgo2, "\tif _Cgo_always_false {\n") - if d.Type.Params != nil { + + if len(paramnames) > 0 { + fmt.Fprintf(fgo2, "\tif _Cgo_always_false {\n") for _, name := range paramnames { fmt.Fprintf(fgo2, "\t\t%s(%s)\n", touchFunc, name) } + fmt.Fprintf(fgo2, "\t}\n") } - fmt.Fprintf(fgo2, "\t}\n") + fmt.Fprintf(fgo2, "\treturn\n") fmt.Fprintf(fgo2, "}\n") } @@ -951,7 +953,7 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) { npad := 0 // the align is at least 1 (for char) maxAlign := int64(1) - argField := func(typ ast.Expr, namePat string, args ...interface{}) { + argField := func(typ ast.Expr, namePat string, args ...any) { name := fmt.Sprintf(namePat, args...) t := p.cgoType(typ) if off%t.Align != 0 { @@ -1144,6 +1146,10 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) { if !p.hasPointer(nil, atype, false) { return } + + // Use the export'ed file/line in error messages. + pos := fset.Position(exp.Func.Pos()) + fmt.Fprintf(fgo2, "//line %s:%d\n", pos.Filename, pos.Line) fmt.Fprintf(fgo2, "\t_cgoCheckResult(a.r%d)\n", i) }) } @@ -1406,7 +1412,7 @@ func forFieldList(fl *ast.FieldList, fn func(int, string, ast.Expr)) { } } -func c(repr string, args ...interface{}) *TypeRepr { +func c(repr string, args ...any) *TypeRepr { return &TypeRepr{repr, args} } diff --git a/src/cmd/cgo/util.go b/src/cmd/cgo/util.go index 23b4a414db7..e83634ffb22 100644 --- a/src/cmd/cgo/util.go +++ b/src/cmd/cgo/util.go @@ -75,7 +75,7 @@ func lineno(pos token.Pos) string { } // Die with an error message. -func fatalf(msg string, args ...interface{}) { +func fatalf(msg string, args ...any) { // If we've already printed other errors, they might have // caused the fatal condition. Assume they're enough. if nerrors == 0 { @@ -86,7 +86,7 @@ func fatalf(msg string, args ...interface{}) { var nerrors int -func error_(pos token.Pos, msg string, args ...interface{}) { +func error_(pos token.Pos, msg string, args ...any) { nerrors++ if pos.IsValid() { fmt.Fprintf(os.Stderr, "%s: ", fset.Position(pos).String()) diff --git a/src/cmd/compile/README.md b/src/cmd/compile/README.md index 1089348030d..02429d56886 100644 --- a/src/cmd/compile/README.md +++ b/src/cmd/compile/README.md @@ -289,9 +289,9 @@ dependencies, so is not suitable for distributed build systems.) ``` After that, your edit/compile/test cycle can be similar to: ``` - <... make edits to cmd/compile source ...> + [... make edits to cmd/compile source ...] $ toolstash restore && go install cmd/compile # restore known good tools to build compiler - <... 'go build', 'go test', etc. ...> # use freshly built compiler + [... 'go build', 'go test', etc. ...] # use freshly built compiler ``` * toolstash also allows comparing the installed vs. stashed copy of diff --git a/src/cmd/compile/internal/abt/avlint32.go b/src/cmd/compile/internal/abt/avlint32.go index ddfca346a2f..e41a6c0ca40 100644 --- a/src/cmd/compile/internal/abt/avlint32.go +++ b/src/cmd/compile/internal/abt/avlint32.go @@ -28,7 +28,7 @@ type T struct { type node32 struct { // Standard conventions hold for left = smaller, right = larger left, right *node32 - data interface{} + data any key int32 height_ int8 } @@ -49,21 +49,21 @@ func (t *T) IsSingle() bool { // VisitInOrder applies f to the key and data pairs in t, // with keys ordered from smallest to largest. -func (t *T) VisitInOrder(f func(int32, interface{})) { +func (t *T) VisitInOrder(f func(int32, any)) { if t.root == nil { return } t.root.visitInOrder(f) } -func (n *node32) nilOrData() interface{} { +func (n *node32) nilOrData() any { if n == nil { return nil } return n.data } -func (n *node32) nilOrKeyAndData() (k int32, d interface{}) { +func (n *node32) nilOrKeyAndData() (k int32, d any) { if n == nil { k = NOT_KEY32 d = nil @@ -83,7 +83,7 @@ func (n *node32) height() int8 { // Find returns the data associated with x in the tree, or // nil if x is not in the tree. -func (t *T) Find(x int32) interface{} { +func (t *T) Find(x int32) any { return t.root.find(x).nilOrData() } @@ -92,7 +92,7 @@ func (t *T) Find(x int32) interface{} { // x was already a key in the tree. The previous data associated // with x is returned, and is nil if x was not previously a // key in the tree. -func (t *T) Insert(x int32, data interface{}) interface{} { +func (t *T) Insert(x int32, data any) any { if x == NOT_KEY32 { panic("Cannot use sentinel value -0x80000000 as key") } @@ -105,7 +105,7 @@ func (t *T) Insert(x int32, data interface{}) interface{} { } else { newroot, n, o = n.aInsert(x) } - var r interface{} + var r any if o != nil { r = o.data } else { @@ -121,7 +121,7 @@ func (t *T) Copy() *T { return &u } -func (t *T) Delete(x int32) interface{} { +func (t *T) Delete(x int32) any { n := t.root if n == nil { return nil @@ -135,7 +135,7 @@ func (t *T) Delete(x int32) interface{} { return d.data } -func (t *T) DeleteMin() (int32, interface{}) { +func (t *T) DeleteMin() (int32, any) { n := t.root if n == nil { return NOT_KEY32, nil @@ -149,7 +149,7 @@ func (t *T) DeleteMin() (int32, interface{}) { return d.key, d.data } -func (t *T) DeleteMax() (int32, interface{}) { +func (t *T) DeleteMax() (int32, any) { n := t.root if n == nil { return NOT_KEY32, nil @@ -172,7 +172,7 @@ func (t *T) Size() int { // not be symmetric. If f returns nil, then the key and data are not // added to the result. If f itself is nil, then whatever value was // already present in the smaller set is used. -func (t *T) Intersection(u *T, f func(x, y interface{}) interface{}) *T { +func (t *T) Intersection(u *T, f func(x, y any) any) *T { if t.Size() == 0 || u.Size() == 0 { return &T{} } @@ -227,7 +227,7 @@ func (t *T) Intersection(u *T, f func(x, y interface{}) interface{}) *T { // is given by f(t's data, u's data) -- f need not be symmetric. If f returns nil, // then the key and data are not added to the result. If f itself is nil, then // whatever value was already present in the larger set is used. -func (t *T) Union(u *T, f func(x, y interface{}) interface{}) *T { +func (t *T) Union(u *T, f func(x, y any) any) *T { if t.Size() == 0 { return u } @@ -284,7 +284,7 @@ func (t *T) Union(u *T, f func(x, y interface{}) interface{}) *T { // of f applied to data corresponding to equal keys. If f returns nil // (or if f is nil) then the key+data are excluded, as usual. If f // returns not-nil, then that key+data pair is inserted. instead. -func (t *T) Difference(u *T, f func(x, y interface{}) interface{}) *T { +func (t *T) Difference(u *T, f func(x, y any) any) *T { if t.Size() == 0 { return &T{} } @@ -365,7 +365,7 @@ func (t *node32) equals(u *node32) bool { return it.done() == iu.done() } -func (t *T) Equiv(u *T, eqv func(x, y interface{}) bool) bool { +func (t *T) Equiv(u *T, eqv func(x, y any) bool) bool { if t == u { return true } @@ -375,7 +375,7 @@ func (t *T) Equiv(u *T, eqv func(x, y interface{}) bool) bool { return t.root.equiv(u.root, eqv) } -func (t *node32) equiv(u *node32, eqv func(x, y interface{}) bool) bool { +func (t *node32) equiv(u *node32, eqv func(x, y any) bool) bool { if t == u { return true } @@ -404,7 +404,7 @@ type Iterator struct { it iterator } -func (it *Iterator) Next() (int32, interface{}) { +func (it *Iterator) Next() (int32, any) { x := it.it.next() if x == nil { return NOT_KEY32, nil @@ -461,37 +461,37 @@ func (it *iterator) next() *node32 { // Min returns the minimum element of t. // If t is empty, then (NOT_KEY32, nil) is returned. -func (t *T) Min() (k int32, d interface{}) { +func (t *T) Min() (k int32, d any) { return t.root.min().nilOrKeyAndData() } // Max returns the maximum element of t. // If t is empty, then (NOT_KEY32, nil) is returned. -func (t *T) Max() (k int32, d interface{}) { +func (t *T) Max() (k int32, d any) { return t.root.max().nilOrKeyAndData() } // Glb returns the greatest-lower-bound-exclusive of x and the associated // data. If x has no glb in the tree, then (NOT_KEY32, nil) is returned. -func (t *T) Glb(x int32) (k int32, d interface{}) { +func (t *T) Glb(x int32) (k int32, d any) { return t.root.glb(x, false).nilOrKeyAndData() } // GlbEq returns the greatest-lower-bound-inclusive of x and the associated // data. If x has no glbEQ in the tree, then (NOT_KEY32, nil) is returned. -func (t *T) GlbEq(x int32) (k int32, d interface{}) { +func (t *T) GlbEq(x int32) (k int32, d any) { return t.root.glb(x, true).nilOrKeyAndData() } // Lub returns the least-upper-bound-exclusive of x and the associated // data. If x has no lub in the tree, then (NOT_KEY32, nil) is returned. -func (t *T) Lub(x int32) (k int32, d interface{}) { +func (t *T) Lub(x int32) (k int32, d any) { return t.root.lub(x, false).nilOrKeyAndData() } // LubEq returns the least-upper-bound-inclusive of x and the associated // data. If x has no lubEq in the tree, then (NOT_KEY32, nil) is returned. -func (t *T) LubEq(x int32) (k int32, d interface{}) { +func (t *T) LubEq(x int32) (k int32, d any) { return t.root.lub(x, true).nilOrKeyAndData() } @@ -499,7 +499,7 @@ func (t *node32) isLeaf() bool { return t.left == nil && t.right == nil && t.height_ == LEAF_HEIGHT } -func (t *node32) visitInOrder(f func(int32, interface{})) { +func (t *node32) visitInOrder(f func(int32, any)) { if t.left != nil { t.left.visitInOrder(f) } diff --git a/src/cmd/compile/internal/abt/avlint32_test.go b/src/cmd/compile/internal/abt/avlint32_test.go index 7fa9ed4fd68..71962445f2b 100644 --- a/src/cmd/compile/internal/abt/avlint32_test.go +++ b/src/cmd/compile/internal/abt/avlint32_test.go @@ -317,7 +317,7 @@ func applicIterator(te *testing.T, x []int32) { } } -func equiv(a, b interface{}) bool { +func equiv(a, b any) bool { sa, sb := a.(*sstring), b.(*sstring) return *sa == *sb } @@ -450,16 +450,16 @@ func TestEquals(t *testing.T) { []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2}) } -func first(x, y interface{}) interface{} { +func first(x, y any) any { return x } -func second(x, y interface{}) interface{} { +func second(x, y any) any { return y } -func alwaysNil(x, y interface{}) interface{} { +func alwaysNil(x, y any) any { return nil } -func smaller(x, y interface{}) interface{} { +func smaller(x, y any) any { xi, _ := strconv.Atoi(fmt.Sprint(x)) yi, _ := strconv.Atoi(fmt.Sprint(y)) if xi < yi { @@ -560,7 +560,7 @@ func (s *sstring) String() string { return s.s } -func stringer(s string) interface{} { +func stringer(s string) any { return &sstring{s} } diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go index a3bfb491b8b..b31ffa474bc 100644 --- a/src/cmd/compile/internal/arm/ssa.go +++ b/src/cmd/compile/internal/arm/ssa.go @@ -245,6 +245,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = r case ssa.OpARMADDS, + ssa.OpARMADCS, ssa.OpARMSUBS: r := v.Reg0() r1 := v.Args[0].Reg() diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go index 7bc0e536e94..43ecb6b4b71 100644 --- a/src/cmd/compile/internal/arm64/ssa.go +++ b/src/cmd/compile/internal/arm64/ssa.go @@ -1189,8 +1189,9 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { if dstReg == srcReg { break } - tmpReg1 := int16(arm64.REG_R24) - tmpReg2 := int16(arm64.REG_R25) + tmpReg1 := int16(arm64.REG_R25) + tmpFReg1 := int16(arm64.REG_F16) + tmpFReg2 := int16(arm64.REG_F17) n := v.AuxInt if n < 16 { v.Fatalf("Move too small %d", n) @@ -1198,10 +1199,17 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // Generate copying instructions. var off int64 + for n >= 32 { + // FLDPQ off(srcReg), (tmpFReg1, tmpFReg2) + // FSTPQ (tmpFReg1, tmpFReg2), off(dstReg) + move32(s, srcReg, dstReg, tmpFReg1, tmpFReg2, off, false) + off += 32 + n -= 32 + } for n >= 16 { - // LDP off(srcReg), (tmpReg1, tmpReg2) - // STP (tmpReg1, tmpReg2), off(dstReg) - move16(s, srcReg, dstReg, tmpReg1, tmpReg2, off, false) + // FMOVQ off(src), tmpFReg1 + // FMOVQ tmpFReg1, off(dst) + move16(s, srcReg, dstReg, tmpFReg1, off, false) off += 16 n -= 16 } @@ -1223,9 +1231,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { if dstReg == srcReg { break } - countReg := int16(arm64.REG_R23) - tmpReg1 := int16(arm64.REG_R24) - tmpReg2 := int16(arm64.REG_R25) + countReg := int16(arm64.REG_R24) + tmpReg1 := int16(arm64.REG_R25) + tmpFReg1 := int16(arm64.REG_F16) + tmpFReg2 := int16(arm64.REG_F17) n := v.AuxInt loopSize := int64(64) if n < 3*loopSize { @@ -1251,10 +1260,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // Move loopSize bytes starting at srcReg to dstReg. // Increment srcReg and destReg by loopSize as a side effect. - for range loopSize / 16 { - // LDP.P 16(srcReg), (tmpReg1, tmpReg2) - // STP.P (tmpReg1, tmpReg2), 16(dstReg) - move16(s, srcReg, dstReg, tmpReg1, tmpReg2, 0, true) + for range loopSize / 32 { + // FLDPQ.P 32(srcReg), (tmpFReg1, tmpFReg2) + // FSTPQ.P (tmpFReg1, tmpFReg2), 32(dstReg) + move32(s, srcReg, dstReg, tmpFReg1, tmpFReg2, 0, true) } // Decrement loop count. // SUB $1, countReg @@ -1276,10 +1285,17 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // Copy any fractional portion. var off int64 + for n >= 32 { + // FLDPQ off(srcReg), (tmpFReg1, tmpFReg2) + // FSTPQ (tmpFReg1, tmpFReg2), off(dstReg) + move32(s, srcReg, dstReg, tmpFReg1, tmpFReg2, off, false) + off += 32 + n -= 32 + } for n >= 16 { - // LDP off(srcReg), (tmpReg1, tmpReg2) - // STP (tmpReg1, tmpReg2), off(dstReg) - move16(s, srcReg, dstReg, tmpReg1, tmpReg2, off, false) + // FMOVQ off(src), tmpFReg1 + // FMOVQ tmpFReg1, off(dst) + move16(s, srcReg, dstReg, tmpFReg1, off, false) off += 16 n -= 16 } @@ -1699,26 +1715,55 @@ func zero8(s *ssagen.State, reg int16, off int64) { p.To.Offset = off } -// move16 copies 16 bytes at src+off to dst+off. +// move32 copies 32 bytes at src+off to dst+off. // Uses registers tmp1 and tmp2. -// If postInc is true, increment src and dst by 16. -func move16(s *ssagen.State, src, dst, tmp1, tmp2 int16, off int64, postInc bool) { - // LDP off(src), (tmp1, tmp2) - ld := s.Prog(arm64.ALDP) +// If postInc is true, increment src and dst by 32. +func move32(s *ssagen.State, src, dst, tmp1, tmp2 int16, off int64, postInc bool) { + // FLDPQ off(src), (tmp1, tmp2) + ld := s.Prog(arm64.AFLDPQ) ld.From.Type = obj.TYPE_MEM ld.From.Reg = src ld.From.Offset = off ld.To.Type = obj.TYPE_REGREG ld.To.Reg = tmp1 ld.To.Offset = int64(tmp2) - // STP (tmp1, tmp2), off(dst) - st := s.Prog(arm64.ASTP) + // FSTPQ (tmp1, tmp2), off(dst) + st := s.Prog(arm64.AFSTPQ) st.From.Type = obj.TYPE_REGREG st.From.Reg = tmp1 st.From.Offset = int64(tmp2) st.To.Type = obj.TYPE_MEM st.To.Reg = dst st.To.Offset = off + if postInc { + if off != 0 { + panic("can't postinc with non-zero offset") + } + ld.Scond = arm64.C_XPOST + st.Scond = arm64.C_XPOST + ld.From.Offset = 32 + st.To.Offset = 32 + } +} + +// move16 copies 16 bytes at src+off to dst+off. +// Uses register tmp1 +// If postInc is true, increment src and dst by 16. +func move16(s *ssagen.State, src, dst, tmp1 int16, off int64, postInc bool) { + // FMOVQ off(src), tmp1 + ld := s.Prog(arm64.AFMOVQ) + ld.From.Type = obj.TYPE_MEM + ld.From.Reg = src + ld.From.Offset = off + ld.To.Type = obj.TYPE_REG + ld.To.Reg = tmp1 + // FMOVQ tmp1, off(dst) + st := s.Prog(arm64.AFMOVQ) + st.From.Type = obj.TYPE_REG + st.From.Reg = tmp1 + st.To.Type = obj.TYPE_MEM + st.To.Reg = dst + st.To.Offset = off if postInc { if off != 0 { panic("can't postinc with non-zero offset") diff --git a/src/cmd/compile/internal/base/debug.go b/src/cmd/compile/internal/base/debug.go index 85873dcc40e..9e8ab2f488b 100644 --- a/src/cmd/compile/internal/base/debug.go +++ b/src/cmd/compile/internal/base/debug.go @@ -20,6 +20,7 @@ type DebugFlags struct { Append int `help:"print information about append compilation"` Checkptr int `help:"instrument unsafe pointer conversions\n0: instrumentation disabled\n1: conversions involving unsafe.Pointer are instrumented\n2: conversions to unsafe.Pointer force heap allocation" concurrent:"ok"` Closure int `help:"print information about closure compilation"` + Converthash string `help:"hash value for use in debugging changes to platform-dependent float-to-[u]int conversion" concurrent:"ok"` Defer int `help:"print information about defer compilation"` DisableNil int `help:"disable nil checks" concurrent:"ok"` DumpInlFuncProps string `help:"dump function properties from inl heuristics to specified file"` diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go index a0ed876cfc8..1d211e0a2dd 100644 --- a/src/cmd/compile/internal/base/flag.go +++ b/src/cmd/compile/internal/base/flag.go @@ -262,6 +262,12 @@ func ParseFlags() { Debug.LoopVar = 1 } + if Debug.Converthash != "" { + ConvertHash = NewHashDebug("converthash", Debug.Converthash, nil) + } else { + // quietly disable the convert hash changes + ConvertHash = NewHashDebug("converthash", "qn", nil) + } if Debug.Fmahash != "" { FmaHash = NewHashDebug("fmahash", Debug.Fmahash, nil) } diff --git a/src/cmd/compile/internal/base/hashdebug.go b/src/cmd/compile/internal/base/hashdebug.go index fa63deb46a3..edf567457cb 100644 --- a/src/cmd/compile/internal/base/hashdebug.go +++ b/src/cmd/compile/internal/base/hashdebug.go @@ -53,6 +53,7 @@ func (d *HashDebug) SetInlineSuffixOnly(b bool) *HashDebug { // The default compiler-debugging HashDebug, for "-d=gossahash=..." var hashDebug *HashDebug +var ConvertHash *HashDebug // for debugging float-to-[u]int conversion changes var FmaHash *HashDebug // for debugging fused-multiply-add floating point changes var LoopVarHash *HashDebug // for debugging shared/private loop variable changes var PGOHash *HashDebug // for debugging PGO optimization decisions diff --git a/src/cmd/compile/internal/base/print.go b/src/cmd/compile/internal/base/print.go index 9e3348c1ecc..6bfc84cd62d 100644 --- a/src/cmd/compile/internal/base/print.go +++ b/src/cmd/compile/internal/base/print.go @@ -45,7 +45,7 @@ func SyntaxErrors() int { } // addErrorMsg adds a new errorMsg (which may be a warning) to errorMsgs. -func addErrorMsg(pos src.XPos, code errors.Code, format string, args ...interface{}) { +func addErrorMsg(pos src.XPos, code errors.Code, format string, args ...any) { msg := fmt.Sprintf(format, args...) // Only add the position if know the position. // See issue golang.org/issue/11361. @@ -108,12 +108,12 @@ func sameline(a, b src.XPos) bool { } // Errorf reports a formatted error at the current line. -func Errorf(format string, args ...interface{}) { +func Errorf(format string, args ...any) { ErrorfAt(Pos, 0, format, args...) } // ErrorfAt reports a formatted error message at pos. -func ErrorfAt(pos src.XPos, code errors.Code, format string, args ...interface{}) { +func ErrorfAt(pos src.XPos, code errors.Code, format string, args ...any) { msg := fmt.Sprintf(format, args...) if strings.HasPrefix(msg, "syntax error") { @@ -164,7 +164,7 @@ func UpdateErrorDot(line string, name, expr string) { // In general the Go compiler does NOT generate warnings, // so this should be used only when the user has opted in // to additional output by setting a particular flag. -func Warn(format string, args ...interface{}) { +func Warn(format string, args ...any) { WarnfAt(Pos, format, args...) } @@ -172,7 +172,7 @@ func Warn(format string, args ...interface{}) { // In general the Go compiler does NOT generate warnings, // so this should be used only when the user has opted in // to additional output by setting a particular flag. -func WarnfAt(pos src.XPos, format string, args ...interface{}) { +func WarnfAt(pos src.XPos, format string, args ...any) { addErrorMsg(pos, 0, format, args...) if Flag.LowerM != 0 { FlushErrors() @@ -191,7 +191,7 @@ func WarnfAt(pos src.XPos, format string, args ...interface{}) { // prints a stack trace. // // If -h has been specified, Fatalf panics to force the usual runtime info dump. -func Fatalf(format string, args ...interface{}) { +func Fatalf(format string, args ...any) { FatalfAt(Pos, format, args...) } @@ -209,7 +209,7 @@ var bugStack = counter.NewStack("compile/bug", 16) // 16 is arbitrary; used by g // prints a stack trace. // // If -h has been specified, FatalfAt panics to force the usual runtime info dump. -func FatalfAt(pos src.XPos, format string, args ...interface{}) { +func FatalfAt(pos src.XPos, format string, args ...any) { FlushErrors() bugStack.Inc() @@ -244,14 +244,14 @@ func Assert(b bool) { } // Assertf reports a fatal error with Fatalf, unless b is true. -func Assertf(b bool, format string, args ...interface{}) { +func Assertf(b bool, format string, args ...any) { if !b { Fatalf(format, args...) } } // AssertfAt reports a fatal error with FatalfAt, unless b is true. -func AssertfAt(b bool, pos src.XPos, format string, args ...interface{}) { +func AssertfAt(b bool, pos src.XPos, format string, args ...any) { if !b { FatalfAt(pos, format, args...) } diff --git a/src/cmd/compile/internal/base/timings.go b/src/cmd/compile/internal/base/timings.go index f48ac93699b..cbcd4dc6f55 100644 --- a/src/cmd/compile/internal/base/timings.go +++ b/src/cmd/compile/internal/base/timings.go @@ -168,7 +168,7 @@ type lines [][]string func (lines *lines) add(label string, n int, dt, tot time.Duration, events []*event) { var line []string - add := func(format string, args ...interface{}) { + add := func(format string, args ...any) { line = append(line, fmt.Sprintf(format, args...)) } diff --git a/src/cmd/compile/internal/bitvec/bv.go b/src/cmd/compile/internal/bitvec/bv.go index d3133dcd2dc..9214aa6cd05 100644 --- a/src/cmd/compile/internal/bitvec/bv.go +++ b/src/cmd/compile/internal/bitvec/bv.go @@ -93,7 +93,7 @@ func (bv BitVec) Unset(i int32) { bv.B[i/wordBits] &^= mask } -// bvnext returns the smallest index >= i for which bvget(bv, i) == 1. +// Next returns the smallest index >= i for which bvget(bv, i) == 1. // If there is no such index, bvnext returns -1. func (bv BitVec) Next(i int32) int32 { if i >= bv.N { diff --git a/src/cmd/compile/internal/coverage/cover.go b/src/cmd/compile/internal/coverage/cover.go index 51f934f0600..5ecd5271f61 100644 --- a/src/cmd/compile/internal/coverage/cover.go +++ b/src/cmd/compile/internal/coverage/cover.go @@ -131,7 +131,7 @@ func metaHashAndLen() ([16]byte, int) { } var hv [16]byte for i := 0; i < 16; i++ { - nib := string(mhash[i*2 : i*2+2]) + nib := mhash[i*2 : i*2+2] x, err := strconv.ParseInt(nib, 16, 32) if err != nil { base.Fatalf("metahash bad byte %q", nib) diff --git a/src/cmd/compile/internal/devirtualize/devirtualize.go b/src/cmd/compile/internal/devirtualize/devirtualize.go index 372d0580940..cb4608a0246 100644 --- a/src/cmd/compile/internal/devirtualize/devirtualize.go +++ b/src/cmd/compile/internal/devirtualize/devirtualize.go @@ -18,9 +18,11 @@ import ( "cmd/compile/internal/types" ) +const go126ImprovedConcreteTypeAnalysis = true + // StaticCall devirtualizes the given call if possible when the concrete callee // is available statically. -func StaticCall(call *ir.CallExpr) { +func StaticCall(s *State, call *ir.CallExpr) { // For promoted methods (including value-receiver methods promoted // to pointer-receivers), the interface method wrapper may contain // expressions that can panic (e.g., ODEREF, ODOTPTR, @@ -40,15 +42,31 @@ func StaticCall(call *ir.CallExpr) { } sel := call.Fun.(*ir.SelectorExpr) - r := ir.StaticValue(sel.X) - if r.Op() != ir.OCONVIFACE { - return - } - recv := r.(*ir.ConvExpr) + var typ *types.Type + if go126ImprovedConcreteTypeAnalysis { + typ = concreteType(s, sel.X) + if typ == nil { + return + } - typ := recv.X.Type() - if typ.IsInterface() { - return + // Don't create type-assertions that would be impossible at compile-time. + // This can happen in such case: any(0).(interface {A()}).A(), this typechecks without + // any errors, but will cause a runtime panic. We statically know that int(0) does not + // implement that interface, thus we skip the devirtualization, as it is not possible + // to make an assertion: any(0).(interface{A()}).(int) (int does not implement interface{A()}). + if !typecheck.Implements(typ, sel.X.Type()) { + return + } + } else { + r := ir.StaticValue(sel.X) + if r.Op() != ir.OCONVIFACE { + return + } + recv := r.(*ir.ConvExpr) + typ = recv.X.Type() + if typ.IsInterface() { + return + } } // If typ is a shape type, then it was a type argument originally @@ -99,8 +117,27 @@ func StaticCall(call *ir.CallExpr) { return } - dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, nil) - dt.SetType(typ) + dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, typ) + + if go126ImprovedConcreteTypeAnalysis { + // Consider: + // + // var v Iface + // v.A() + // v = &Impl{} + // + // Here in the devirtualizer, we determine the concrete type of v as being an *Impl, + // but it can still be a nil interface, we have not detected that. The v.(*Impl) + // type assertion that we make here would also have failed, but with a different + // panic "pkg.Iface is nil, not *pkg.Impl", where previously we would get a nil panic. + // We fix this, by introducing an additional nilcheck on the itab. + // Calling a method on an nil interface (in most cases) is a bug in a program, so it is fine + // to devirtualize and further (possibly) inline them, even though we would never reach + // the called function. + dt.UseNilPanic = true + dt.SetPos(call.Pos()) + } + x := typecheck.XDotMethod(sel.Pos(), dt, sel.Sel, true) switch x.Op() { case ir.ODOTMETH: @@ -138,3 +175,413 @@ func StaticCall(call *ir.CallExpr) { // Desugar OCALLMETH, if we created one (#57309). typecheck.FixMethodCall(call) } + +const concreteTypeDebug = false + +// concreteType determines the concrete type of n, following OCONVIFACEs and type asserts. +// Returns nil when the concrete type could not be determined, or when there are multiple +// (different) types assigned to an interface. +func concreteType(s *State, n ir.Node) (typ *types.Type) { + typ = concreteType1(s, n, make(map[*ir.Name]struct{})) + if typ == &noType { + return nil + } + if typ != nil && typ.IsInterface() { + base.FatalfAt(n.Pos(), "typ.IsInterface() = true; want = false; typ = %v", typ) + } + return typ +} + +// noType is a sentinel value returned by [concreteType1]. +var noType types.Type + +// concreteType1 analyzes the node n and returns its concrete type if it is statically known. +// Otherwise, it returns a nil Type, indicating that a concrete type was not determined. +// When n is known to be statically nil or a self-assignment is detected, in returns a sentinel [noType] type instead. +func concreteType1(s *State, n ir.Node, seen map[*ir.Name]struct{}) (outT *types.Type) { + nn := n // for debug messages + + if concreteTypeDebug { + defer func() { + t := "&noType" + if outT != &noType { + t = outT.String() + } + base.Warn("concreteType1(%v) -> %v", nn, t) + }() + } + + for { + if concreteTypeDebug { + base.Warn("concreteType1(%v): analyzing %v", nn, n) + } + + if !n.Type().IsInterface() { + return n.Type() + } + + switch n1 := n.(type) { + case *ir.ConvExpr: + if n1.Op() == ir.OCONVNOP { + if !n1.Type().IsInterface() || !types.Identical(n1.Type().Underlying(), n1.X.Type().Underlying()) { + // As we check (directly before this switch) whether n is an interface, thus we should only reach + // here for iface conversions where both operands are the same. + base.FatalfAt(n1.Pos(), "not identical/interface types found n1.Type = %v; n1.X.Type = %v", n1.Type(), n1.X.Type()) + } + n = n1.X + continue + } + if n1.Op() == ir.OCONVIFACE { + n = n1.X + continue + } + case *ir.InlinedCallExpr: + if n1.Op() == ir.OINLCALL { + n = n1.SingleResult() + continue + } + case *ir.ParenExpr: + n = n1.X + continue + case *ir.TypeAssertExpr: + n = n1.X + continue + } + break + } + + if n.Op() != ir.ONAME { + return nil + } + + name := n.(*ir.Name).Canonical() + if name.Class != ir.PAUTO { + return nil + } + + if name.Op() != ir.ONAME { + base.FatalfAt(name.Pos(), "name.Op = %v; want = ONAME", n.Op()) + } + + // name.Curfn must be set, as we checked name.Class != ir.PAUTO before. + if name.Curfn == nil { + base.FatalfAt(name.Pos(), "name.Curfn = nil; want not nil") + } + + if name.Addrtaken() { + return nil // conservatively assume it's reassigned with a different type indirectly + } + + if _, ok := seen[name]; ok { + return &noType // Already analyzed assignments to name, no need to do that twice. + } + seen[name] = struct{}{} + + if concreteTypeDebug { + base.Warn("concreteType1(%v): analyzing assignments to %v", nn, name) + } + + var typ *types.Type + for _, v := range s.assignments(name) { + var t *types.Type + switch v := v.(type) { + case *types.Type: + t = v + case ir.Node: + t = concreteType1(s, v, seen) + if t == &noType { + continue + } + } + if t == nil || (typ != nil && !types.Identical(typ, t)) { + return nil + } + typ = t + } + + if typ == nil { + // Variable either declared with zero value, or only assigned with nil. + return &noType + } + + return typ +} + +// assignment can be one of: +// - nil - assignment from an interface type. +// - *types.Type - assignment from a concrete type (non-interface). +// - ir.Node - assignment from a ir.Node. +// +// In most cases assignment should be an [ir.Node], but in cases where we +// do not follow the data-flow, we return either a concrete type (*types.Type) or a nil. +// For example in range over a slice, if the slice elem is of an interface type, then we return +// a nil, otherwise the elem's concrete type (We do so because we do not analyze assignment to the +// slice being ranged-over). +type assignment any + +// State holds precomputed state for use in [StaticCall]. +type State struct { + // ifaceAssignments maps interface variables to all their assignments + // defined inside functions stored in the analyzedFuncs set. + // Note: it does not include direct assignments to nil. + ifaceAssignments map[*ir.Name][]assignment + + // ifaceCallExprAssigns stores every [*ir.CallExpr], which has an interface + // result, that is assigned to a variable. + ifaceCallExprAssigns map[*ir.CallExpr][]ifaceAssignRef + + // analyzedFuncs is a set of Funcs that were analyzed for iface assignments. + analyzedFuncs map[*ir.Func]struct{} +} + +type ifaceAssignRef struct { + name *ir.Name // ifaceAssignments[name] + assignmentIndex int // ifaceAssignments[name][assignmentIndex] + returnIndex int // (*ir.CallExpr).Result(returnIndex) +} + +// InlinedCall updates the [State] to take into account a newly inlined call. +func (s *State) InlinedCall(fun *ir.Func, origCall *ir.CallExpr, inlinedCall *ir.InlinedCallExpr) { + if _, ok := s.analyzedFuncs[fun]; !ok { + // Full analyze has not been yet executed for the provided function, so we can skip it for now. + // When no devirtualization happens in a function, it is unnecessary to analyze it. + return + } + + // Analyze assignments in the newly inlined function. + s.analyze(inlinedCall.Init()) + s.analyze(inlinedCall.Body) + + refs, ok := s.ifaceCallExprAssigns[origCall] + if !ok { + return + } + delete(s.ifaceCallExprAssigns, origCall) + + // Update assignments to reference the new ReturnVars of the inlined call. + for _, ref := range refs { + vt := &s.ifaceAssignments[ref.name][ref.assignmentIndex] + if *vt != nil { + base.Fatalf("unexpected non-nil assignment") + } + if concreteTypeDebug { + base.Warn( + "InlinedCall(%v, %v): replacing interface node in (%v,%v) to %v (typ %v)", + origCall, inlinedCall, ref.name, ref.assignmentIndex, + inlinedCall.ReturnVars[ref.returnIndex], + inlinedCall.ReturnVars[ref.returnIndex].Type(), + ) + } + + // Update ifaceAssignments with an ir.Node from the inlined function’s ReturnVars. + // This may enable future devirtualization of calls that reference ref.name. + // We will get calls to [StaticCall] from the interleaved package, + // to try devirtualize such calls afterwards. + *vt = inlinedCall.ReturnVars[ref.returnIndex] + } +} + +// assignments returns all assignments to n. +func (s *State) assignments(n *ir.Name) []assignment { + fun := n.Curfn + if fun == nil { + base.FatalfAt(n.Pos(), "n.Curfn = ") + } + if n.Class != ir.PAUTO { + base.FatalfAt(n.Pos(), "n.Class = %v; want = PAUTO", n.Class) + } + + if !n.Type().IsInterface() { + base.FatalfAt(n.Pos(), "name passed to assignments is not of an interface type: %v", n.Type()) + } + + // Analyze assignments in func, if not analyzed before. + if _, ok := s.analyzedFuncs[fun]; !ok { + if concreteTypeDebug { + base.Warn("assignments(): analyzing assignments in %v func", fun) + } + if s.analyzedFuncs == nil { + s.ifaceAssignments = make(map[*ir.Name][]assignment) + s.ifaceCallExprAssigns = make(map[*ir.CallExpr][]ifaceAssignRef) + s.analyzedFuncs = make(map[*ir.Func]struct{}) + } + s.analyzedFuncs[fun] = struct{}{} + s.analyze(fun.Init()) + s.analyze(fun.Body) + } + + return s.ifaceAssignments[n] +} + +// analyze analyzes every assignment to interface variables in nodes, updating [State]. +func (s *State) analyze(nodes ir.Nodes) { + assign := func(name ir.Node, assignment assignment) (*ir.Name, int) { + if name == nil || name.Op() != ir.ONAME || ir.IsBlank(name) { + return nil, -1 + } + + n, ok := ir.OuterValue(name).(*ir.Name) + if !ok || n.Curfn == nil { + return nil, -1 + } + + // Do not track variables that are not of interface types. + // For devirtualization they are unnecessary, we will not even look them up. + if !n.Type().IsInterface() { + return nil, -1 + } + + n = n.Canonical() + if n.Op() != ir.ONAME { + base.FatalfAt(n.Pos(), "n.Op = %v; want = ONAME", n.Op()) + } + if n.Class != ir.PAUTO { + return nil, -1 + } + + switch a := assignment.(type) { + case nil: + case *types.Type: + if a != nil && a.IsInterface() { + assignment = nil // non-concrete type + } + case ir.Node: + // nil assignment, we can safely ignore them, see [StaticCall]. + if ir.IsNil(a) { + return nil, -1 + } + default: + base.Fatalf("unexpected type: %v", assignment) + } + + if concreteTypeDebug { + base.Warn("analyze(): assignment found %v = %v", name, assignment) + } + + s.ifaceAssignments[n] = append(s.ifaceAssignments[n], assignment) + return n, len(s.ifaceAssignments[n]) - 1 + } + + var do func(n ir.Node) + do = func(n ir.Node) { + switch n.Op() { + case ir.OAS: + n := n.(*ir.AssignStmt) + if rhs := n.Y; rhs != nil { + for { + if r, ok := rhs.(*ir.ParenExpr); ok { + rhs = r.X + continue + } + break + } + if call, ok := rhs.(*ir.CallExpr); ok && call.Fun != nil { + retTyp := call.Fun.Type().Results()[0].Type + n, idx := assign(n.X, retTyp) + if n != nil && retTyp.IsInterface() { + // We have a call expression, that returns an interface, store it for later evaluation. + // In case this func gets inlined later, we will update the assignment (added before) + // with a reference to ReturnVars, see [State.InlinedCall], which might allow for future devirtualizing of n.X. + s.ifaceCallExprAssigns[call] = append(s.ifaceCallExprAssigns[call], ifaceAssignRef{n, idx, 0}) + } + } else { + assign(n.X, rhs) + } + } + case ir.OAS2: + n := n.(*ir.AssignListStmt) + for i, p := range n.Lhs { + if n.Rhs[i] != nil { + assign(p, n.Rhs[i]) + } + } + case ir.OAS2DOTTYPE: + n := n.(*ir.AssignListStmt) + if n.Rhs[0] == nil { + base.FatalfAt(n.Pos(), "n.Rhs[0] == nil; n = %v", n) + } + assign(n.Lhs[0], n.Rhs[0]) + assign(n.Lhs[1], nil) // boolean does not have methods to devirtualize + case ir.OAS2MAPR, ir.OAS2RECV, ir.OSELRECV2: + n := n.(*ir.AssignListStmt) + if n.Rhs[0] == nil { + base.FatalfAt(n.Pos(), "n.Rhs[0] == nil; n = %v", n) + } + assign(n.Lhs[0], n.Rhs[0].Type()) + assign(n.Lhs[1], nil) // boolean does not have methods to devirtualize + case ir.OAS2FUNC: + n := n.(*ir.AssignListStmt) + rhs := n.Rhs[0] + for { + if r, ok := rhs.(*ir.ParenExpr); ok { + rhs = r.X + continue + } + break + } + if call, ok := rhs.(*ir.CallExpr); ok { + for i, p := range n.Lhs { + retTyp := call.Fun.Type().Results()[i].Type + n, idx := assign(p, retTyp) + if n != nil && retTyp.IsInterface() { + // We have a call expression, that returns an interface, store it for later evaluation. + // In case this func gets inlined later, we will update the assignment (added before) + // with a reference to ReturnVars, see [State.InlinedCall], which might allow for future devirtualizing of n.X. + s.ifaceCallExprAssigns[call] = append(s.ifaceCallExprAssigns[call], ifaceAssignRef{n, idx, i}) + } + } + } else if call, ok := rhs.(*ir.InlinedCallExpr); ok { + for i, p := range n.Lhs { + assign(p, call.ReturnVars[i]) + } + } else { + base.FatalfAt(n.Pos(), "unexpected type %T in OAS2FUNC Rhs[0]", call) + } + case ir.ORANGE: + n := n.(*ir.RangeStmt) + xTyp := n.X.Type() + + // Range over an array pointer. + if xTyp.IsPtr() && xTyp.Elem().IsArray() { + xTyp = xTyp.Elem() + } + + if xTyp.IsArray() || xTyp.IsSlice() { + assign(n.Key, nil) // integer does not have methods to devirtualize + assign(n.Value, xTyp.Elem()) + } else if xTyp.IsChan() { + assign(n.Key, xTyp.Elem()) + base.AssertfAt(n.Value == nil, n.Pos(), "n.Value != nil in range over chan") + } else if xTyp.IsMap() { + assign(n.Key, xTyp.Key()) + assign(n.Value, xTyp.Elem()) + } else if xTyp.IsInteger() || xTyp.IsString() { + // Range over int/string, results do not have methods, so nothing to devirtualize. + assign(n.Key, nil) + assign(n.Value, nil) + } else { + // We will not reach here in case of an range-over-func, as it is + // rewrtten to function calls in the noder package. + base.FatalfAt(n.Pos(), "range over unexpected type %v", n.X.Type()) + } + case ir.OSWITCH: + n := n.(*ir.SwitchStmt) + if guard, ok := n.Tag.(*ir.TypeSwitchGuard); ok { + for _, v := range n.Cases { + if v.Var == nil { + base.Assert(guard.Tag == nil) + continue + } + assign(v.Var, guard.X) + } + } + case ir.OCLOSURE: + n := n.(*ir.ClosureExpr) + if _, ok := s.analyzedFuncs[n.Func]; !ok { + s.analyzedFuncs[n.Func] = struct{}{} + ir.Visit(n.Func, do) + } + } + } + ir.VisitList(nodes, do) +} diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index b1ae55cdb6b..2ce5c8accc5 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -315,7 +315,7 @@ func CanInline(fn *ir.Func, profile *pgoir.Profile) { // function is inlinable. func noteInlinableFunc(n *ir.Name, fn *ir.Func, cost int32) { if base.Flag.LowerM > 1 { - fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, cost, fn.Type(), ir.Nodes(fn.Body)) + fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, cost, fn.Type(), fn.Body) } else if base.Flag.LowerM != 0 { fmt.Printf("%v: can inline %v\n", ir.Line(fn), n) } diff --git a/src/cmd/compile/internal/inline/interleaved/interleaved.go b/src/cmd/compile/internal/inline/interleaved/interleaved.go index 954cc306fc8..c83bbdb718d 100644 --- a/src/cmd/compile/internal/inline/interleaved/interleaved.go +++ b/src/cmd/compile/internal/inline/interleaved/interleaved.go @@ -45,6 +45,8 @@ func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) { inlState := make(map[*ir.Func]*inlClosureState) calleeUseCounts := make(map[*ir.Func]int) + var state devirtualize.State + // Pre-process all the functions, adding parentheses around call sites and starting their "inl state". for _, fn := range typecheck.Target.Funcs { bigCaller := base.Flag.LowerL != 0 && inline.IsBigFunc(fn) @@ -58,7 +60,7 @@ func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) { // Do a first pass at counting call sites. for i := range s.parens { - s.resolve(i) + s.resolve(&state, i) } } @@ -102,10 +104,11 @@ func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) { for { for i := l0; i < l1; i++ { // can't use "range parens" here paren := s.parens[i] - if new := s.edit(i); new != nil { + if origCall, inlinedCall := s.edit(&state, i); inlinedCall != nil { // Update AST and recursively mark nodes. - paren.X = new - ir.EditChildren(new, s.mark) // mark may append to parens + paren.X = inlinedCall + ir.EditChildren(inlinedCall, s.mark) // mark may append to parens + state.InlinedCall(s.fn, origCall, inlinedCall) done = false } } @@ -114,7 +117,7 @@ func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) { break } for i := l0; i < l1; i++ { - s.resolve(i) + s.resolve(&state, i) } } @@ -188,7 +191,7 @@ type inlClosureState struct { // resolve attempts to resolve a call to a potentially inlineable callee // and updates use counts on the callees. Returns the call site count // for that callee. -func (s *inlClosureState) resolve(i int) (*ir.Func, int) { +func (s *inlClosureState) resolve(state *devirtualize.State, i int) (*ir.Func, int) { p := s.parens[i] if i < len(s.resolved) { if callee := s.resolved[i]; callee != nil { @@ -200,7 +203,7 @@ func (s *inlClosureState) resolve(i int) (*ir.Func, int) { if !ok { // previously inlined return nil, -1 } - devirtualize.StaticCall(call) + devirtualize.StaticCall(state, call) if callee := inline.InlineCallTarget(s.fn, call, s.profile); callee != nil { for len(s.resolved) <= i { s.resolved = append(s.resolved, nil) @@ -213,23 +216,23 @@ func (s *inlClosureState) resolve(i int) (*ir.Func, int) { return nil, 0 } -func (s *inlClosureState) edit(i int) ir.Node { +func (s *inlClosureState) edit(state *devirtualize.State, i int) (*ir.CallExpr, *ir.InlinedCallExpr) { n := s.parens[i].X call, ok := n.(*ir.CallExpr) if !ok { - return nil + return nil, nil } // This is redundant with earlier calls to // resolve, but because things can change it // must be re-checked. - callee, count := s.resolve(i) + callee, count := s.resolve(state, i) if count <= 0 { - return nil + return nil, nil } if inlCall := inline.TryInlineCall(s.fn, call, s.bigCaller, s.profile, count == 1 && callee.ClosureParent != nil); inlCall != nil { - return inlCall + return call, inlCall } - return nil + return nil, nil } // Mark inserts parentheses, and is called repeatedly. @@ -338,16 +341,18 @@ func (s *inlClosureState) unparenthesize() { // returns. func (s *inlClosureState) fixpoint() bool { changed := false + var state devirtualize.State ir.WithFunc(s.fn, func() { done := false for !done { done = true for i := 0; i < len(s.parens); i++ { // can't use "range parens" here paren := s.parens[i] - if new := s.edit(i); new != nil { + if origCall, inlinedCall := s.edit(&state, i); inlinedCall != nil { // Update AST and recursively mark nodes. - paren.X = new - ir.EditChildren(new, s.mark) // mark may append to parens + paren.X = inlinedCall + ir.EditChildren(inlinedCall, s.mark) // mark may append to parens + state.InlinedCall(s.fn, origCall, inlinedCall) done = false changed = true } diff --git a/src/cmd/compile/internal/ir/bitset.go b/src/cmd/compile/internal/ir/bitset.go index bae40058669..339e4e524f1 100644 --- a/src/cmd/compile/internal/ir/bitset.go +++ b/src/cmd/compile/internal/ir/bitset.go @@ -23,7 +23,7 @@ func (f *bitset8) set2(shift uint8, b uint8) { // Clear old bits. *(*uint8)(f) &^= 3 << shift // Set new bits. - *(*uint8)(f) |= uint8(b&3) << shift + *(*uint8)(f) |= (b & 3) << shift } type bitset16 uint16 diff --git a/src/cmd/compile/internal/ir/dump.go b/src/cmd/compile/internal/ir/dump.go index 4c218682ea6..3e5e6fbdcee 100644 --- a/src/cmd/compile/internal/ir/dump.go +++ b/src/cmd/compile/internal/ir/dump.go @@ -21,7 +21,7 @@ import ( ) // DumpAny is like FDumpAny but prints to stderr. -func DumpAny(root interface{}, filter string, depth int) { +func DumpAny(root any, filter string, depth int) { FDumpAny(os.Stderr, root, filter, depth) } @@ -42,7 +42,7 @@ func DumpAny(root interface{}, filter string, depth int) { // rather than their type; struct fields with zero values or // non-matching field names are omitted, and "…" means recursion // depth has been reached or struct fields have been omitted. -func FDumpAny(w io.Writer, root interface{}, filter string, depth int) { +func FDumpAny(w io.Writer, root any, filter string, depth int) { if root == nil { fmt.Fprintln(w, "nil") return @@ -110,7 +110,7 @@ func (p *dumper) Write(data []byte) (n int, err error) { } // printf is a convenience wrapper. -func (p *dumper) printf(format string, args ...interface{}) { +func (p *dumper) printf(format string, args ...any) { if _, err := fmt.Fprintf(p, format, args...); err != nil { panic(err) } diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index d07e522d953..25654ca2536 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -617,7 +617,7 @@ func (o Op) IsSlice3() bool { return false } -// A SliceHeader expression constructs a slice header from its parts. +// A SliceHeaderExpr constructs a slice header from its parts. type SliceHeaderExpr struct { miniExpr Ptr Node @@ -665,7 +665,7 @@ func NewStarExpr(pos src.XPos, x Node) *StarExpr { func (n *StarExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } func (n *StarExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } -// A TypeAssertionExpr is a selector expression X.(Type). +// A TypeAssertExpr is a selector expression X.(Type). // Before type-checking, the type is Ntype. type TypeAssertExpr struct { miniExpr @@ -677,6 +677,11 @@ type TypeAssertExpr struct { // An internal/abi.TypeAssert descriptor to pass to the runtime. Descriptor *obj.LSym + + // When set to true, if this assert would panic, then use a nil pointer panic + // instead of an interface conversion panic. + // It must not be set for type asserts using the commaok form. + UseNilPanic bool } func NewTypeAssertExpr(pos src.XPos, x Node, typ *types.Type) *TypeAssertExpr { diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 668537c90e6..e027fe82908 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -90,7 +90,7 @@ type Func struct { Marks []Mark FieldTrack map[*obj.LSym]struct{} - DebugInfo interface{} + DebugInfo any LSym *obj.LSym // Linker object in this function's native ABI (Func.ABI) Inl *Inline diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 6f8d0a7fcc1..01f1c0c5022 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -43,8 +43,8 @@ type Name struct { Func *Func // TODO(austin): nil for I.M Offset_ int64 val constant.Value - Opt interface{} // for use by escape analysis - Embed *[]Embed // list of embedded files, for ONAME var + Opt any // for use by escape analysis + Embed *[]Embed // list of embedded files, for ONAME var // For a local variable (not param) or extern, the initializing assignment (OAS or OAS2). // For a closure var, the ONAME node of the original (outermost) captured variable. diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index 14b6b4f3cd4..b805155e6e3 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -16,9 +16,9 @@ func TestSizeof(t *testing.T) { const _64bit = unsafe.Sizeof(uintptr(0)) == 8 var tests = []struct { - val interface{} // type as a value - _32bit uintptr // size on 32bit platforms - _64bit uintptr // size on 64bit platforms + val any // type as a value + _32bit uintptr // size on 32bit platforms + _64bit uintptr // size on 64bit platforms }{ {Func{}, 184, 312}, {Name{}, 96, 160}, diff --git a/src/cmd/compile/internal/ir/symtab.go b/src/cmd/compile/internal/ir/symtab.go index 0cfa2a2262f..344985f7be1 100644 --- a/src/cmd/compile/internal/ir/symtab.go +++ b/src/cmd/compile/internal/ir/symtab.go @@ -13,48 +13,51 @@ import ( var Syms symsStruct type symsStruct struct { - AssertE2I *obj.LSym - AssertE2I2 *obj.LSym - Asanread *obj.LSym - Asanwrite *obj.LSym - CgoCheckMemmove *obj.LSym - CgoCheckPtrWrite *obj.LSym - CheckPtrAlignment *obj.LSym - Deferproc *obj.LSym - Deferprocat *obj.LSym - DeferprocStack *obj.LSym - Deferreturn *obj.LSym - Duffcopy *obj.LSym - Duffzero *obj.LSym - GCWriteBarrier [8]*obj.LSym - Goschedguarded *obj.LSym - Growslice *obj.LSym - InterfaceSwitch *obj.LSym - MallocGC *obj.LSym - Memmove *obj.LSym - Msanread *obj.LSym - Msanwrite *obj.LSym - Msanmove *obj.LSym - Newobject *obj.LSym - Newproc *obj.LSym - PanicBounds *obj.LSym - PanicExtend *obj.LSym - Panicdivide *obj.LSym - Panicshift *obj.LSym - PanicdottypeE *obj.LSym - PanicdottypeI *obj.LSym - Panicnildottype *obj.LSym - Panicoverflow *obj.LSym - PanicSimdImm *obj.LSym - Racefuncenter *obj.LSym - Racefuncexit *obj.LSym - Raceread *obj.LSym - Racereadrange *obj.LSym - Racewrite *obj.LSym - Racewriterange *obj.LSym - TypeAssert *obj.LSym - WBZero *obj.LSym - WBMove *obj.LSym + AssertE2I *obj.LSym + AssertE2I2 *obj.LSym + Asanread *obj.LSym + Asanwrite *obj.LSym + CgoCheckMemmove *obj.LSym + CgoCheckPtrWrite *obj.LSym + CheckPtrAlignment *obj.LSym + Deferproc *obj.LSym + Deferprocat *obj.LSym + DeferprocStack *obj.LSym + Deferreturn *obj.LSym + Duffcopy *obj.LSym + Duffzero *obj.LSym + GCWriteBarrier [8]*obj.LSym + Goschedguarded *obj.LSym + Growslice *obj.LSym + InterfaceSwitch *obj.LSym + MallocGC *obj.LSym + MallocGCSmallNoScan [27]*obj.LSym + MallocGCSmallScanNoHeader [27]*obj.LSym + MallocGCTiny [16]*obj.LSym + Memmove *obj.LSym + Msanread *obj.LSym + Msanwrite *obj.LSym + Msanmove *obj.LSym + Newobject *obj.LSym + Newproc *obj.LSym + PanicBounds *obj.LSym + PanicExtend *obj.LSym + Panicdivide *obj.LSym + Panicshift *obj.LSym + PanicdottypeE *obj.LSym + PanicdottypeI *obj.LSym + Panicnildottype *obj.LSym + Panicoverflow *obj.LSym + PanicSimdImm *obj.LSym + Racefuncenter *obj.LSym + Racefuncexit *obj.LSym + Raceread *obj.LSym + Racereadrange *obj.LSym + Racewrite *obj.LSym + Racewriterange *obj.LSym + TypeAssert *obj.LSym + WBZero *obj.LSym + WBMove *obj.LSym // Wasm SigPanic *obj.LSym Staticuint64s *obj.LSym diff --git a/src/cmd/compile/internal/logopt/log_opts.go b/src/cmd/compile/internal/logopt/log_opts.go index d08f6fb5d6d..c47c9ee5afb 100644 --- a/src/cmd/compile/internal/logopt/log_opts.go +++ b/src/cmd/compile/internal/logopt/log_opts.go @@ -224,12 +224,12 @@ type Diagnostic struct { // A LoggedOpt is what the compiler produces and accumulates, // to be converted to JSON for human or IDE consumption. type LoggedOpt struct { - pos src.XPos // Source code position at which the event occurred. If it is inlined, outer and all inlined locations will appear in JSON. - lastPos src.XPos // Usually the same as pos; current exception is for reporting entire range of transformed loops - compilerPass string // Compiler pass. For human/adhoc consumption; does not appear in JSON (yet) - functionName string // Function name. For human/adhoc consumption; does not appear in JSON (yet) - what string // The (non) optimization; "nilcheck", "boundsCheck", "inline", "noInline" - target []interface{} // Optional target(s) or parameter(s) of "what" -- what was inlined, why it was not, size of copy, etc. 1st is most important/relevant. + pos src.XPos // Source code position at which the event occurred. If it is inlined, outer and all inlined locations will appear in JSON. + lastPos src.XPos // Usually the same as pos; current exception is for reporting entire range of transformed loops + compilerPass string // Compiler pass. For human/adhoc consumption; does not appear in JSON (yet) + functionName string // Function name. For human/adhoc consumption; does not appear in JSON (yet) + what string // The (non) optimization; "nilcheck", "boundsCheck", "inline", "noInline" + target []any // Optional target(s) or parameter(s) of "what" -- what was inlined, why it was not, size of copy, etc. 1st is most important/relevant. } type logFormat uint8 @@ -325,7 +325,7 @@ var mu = sync.Mutex{} // mu protects loggedOpts. // Pos is the source position (including inlining), what is the message, pass is which pass created the message, // funcName is the name of the function // A typical use for this to accumulate an explanation for a missed optimization, for example, why did something escape? -func NewLoggedOpt(pos, lastPos src.XPos, what, pass, funcName string, args ...interface{}) *LoggedOpt { +func NewLoggedOpt(pos, lastPos src.XPos, what, pass, funcName string, args ...any) *LoggedOpt { pass = strings.ReplaceAll(pass, " ", "_") return &LoggedOpt{pos, lastPos, pass, funcName, what, args} } @@ -333,7 +333,7 @@ func NewLoggedOpt(pos, lastPos src.XPos, what, pass, funcName string, args ...in // LogOpt logs information about a (usually missed) optimization performed by the compiler. // Pos is the source position (including inlining), what is the message, pass is which pass created the message, // funcName is the name of the function. -func LogOpt(pos src.XPos, what, pass, funcName string, args ...interface{}) { +func LogOpt(pos src.XPos, what, pass, funcName string, args ...any) { if Format == None { return } @@ -346,7 +346,7 @@ func LogOpt(pos src.XPos, what, pass, funcName string, args ...interface{}) { // LogOptRange is the same as LogOpt, but includes the ability to express a range of positions, // not just a point. -func LogOptRange(pos, lastPos src.XPos, what, pass, funcName string, args ...interface{}) { +func LogOptRange(pos, lastPos src.XPos, what, pass, funcName string, args ...any) { if Format == None { return } diff --git a/src/cmd/compile/internal/loong64/ssa.go b/src/cmd/compile/internal/loong64/ssa.go index bd0d96a6954..84bbf9b394d 100644 --- a/src/cmd/compile/internal/loong64/ssa.go +++ b/src/cmd/compile/internal/loong64/ssa.go @@ -692,7 +692,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // vs // 16 instructions in the straightline code // Might as well use straightline code. - v.Fatalf("ZeroLoop size too small %d", n) + v.Fatalf("MoveLoop size too small %d", n) } // Put iteration count in a register. @@ -1175,8 +1175,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() p.AddRestSourceArgs([]obj.Addr{ - {Type: obj.TYPE_CONST, Offset: int64((v.AuxInt >> 5) & 0x1fffffffff)}, - {Type: obj.TYPE_CONST, Offset: int64((v.AuxInt >> 0) & 0x1f)}, + {Type: obj.TYPE_CONST, Offset: (v.AuxInt >> 5) & 0x1fffffffff}, + {Type: obj.TYPE_CONST, Offset: (v.AuxInt >> 0) & 0x1f}, }) case ssa.OpLOONG64ADDshiftLLV: diff --git a/src/cmd/compile/internal/loopvar/loopvar.go b/src/cmd/compile/internal/loopvar/loopvar.go index 5a4590d2998..267df2f905c 100644 --- a/src/cmd/compile/internal/loopvar/loopvar.go +++ b/src/cmd/compile/internal/loopvar/loopvar.go @@ -557,7 +557,7 @@ func LogTransformations(transformed []VarAndLoop) { if logopt.Enabled() { // For automated checking of coverage of this transformation, include this in the JSON information. - var nString interface{} = n + var nString any = n if inner != outer { nString = fmt.Sprintf("%v (from inline)", n) } diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go index 41eb2dce1cc..d7dd58d8caa 100644 --- a/src/cmd/compile/internal/noder/reader.go +++ b/src/cmd/compile/internal/noder/reader.go @@ -2961,6 +2961,7 @@ func (r *reader) multiExpr() []ir.Node { as.Def = true for i := range results { tmp := r.temp(pos, r.typ()) + tmp.Defn = as as.PtrInit().Append(ir.NewDecl(pos, ir.ODCL, tmp)) as.Lhs.Append(tmp) @@ -3576,7 +3577,7 @@ func unifiedInlineCall(callerfn *ir.Func, call *ir.CallExpr, fn *ir.Func, inlInd edit(r.curfn) }) - body := ir.Nodes(r.curfn.Body) + body := r.curfn.Body // Reparent any declarations into the caller function. for _, name := range r.curfn.Dcl { diff --git a/src/cmd/compile/internal/noder/writer.go b/src/cmd/compile/internal/noder/writer.go index 9c90d221c28..0b5aa007bf9 100644 --- a/src/cmd/compile/internal/noder/writer.go +++ b/src/cmd/compile/internal/noder/writer.go @@ -120,12 +120,12 @@ func newPkgWriter(m posMap, pkg *types2.Package, info *types2.Info, otherInfo ma } // errorf reports a user error about thing p. -func (pw *pkgWriter) errorf(p poser, msg string, args ...interface{}) { +func (pw *pkgWriter) errorf(p poser, msg string, args ...any) { base.ErrorfAt(pw.m.pos(p), 0, msg, args...) } // fatalf reports an internal compiler error about thing p. -func (pw *pkgWriter) fatalf(p poser, msg string, args ...interface{}) { +func (pw *pkgWriter) fatalf(p poser, msg string, args ...any) { base.FatalfAt(pw.m.pos(p), msg, args...) } diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go index ace3024480e..f0d228559f3 100644 --- a/src/cmd/compile/internal/ppc64/ssa.go +++ b/src/cmd/compile/internal/ppc64/ssa.go @@ -631,7 +631,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()} p.Reg = v.Args[0].Reg() - p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(sh)} + p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: sh} p.AddRestSourceArgs([]obj.Addr{{Type: obj.TYPE_CONST, Offset: mb}, {Type: obj.TYPE_CONST, Offset: me}}) // Auxint holds mask diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index 38b9391c5f1..324007ea798 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -1282,7 +1282,6 @@ func dgcptrmask(t *types.Type, write bool) *obj.LSym { // word offsets in t that hold pointers. // ptrmask is assumed to fit at least types.PtrDataSize(t)/PtrSize bits. func fillptrmask(t *types.Type, ptrmask []byte) { - clear(ptrmask) if !t.HasPointers() { return } diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go index 61de983bb02..9aa77c3d02b 100644 --- a/src/cmd/compile/internal/riscv64/ssa.go +++ b/src/cmd/compile/internal/riscv64/ssa.go @@ -446,6 +446,14 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() + case ssa.OpRISCV64FMOVDconst, ssa.OpRISCV64FMOVFconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_FCONST + p.From.Val = v.AuxFloat() + p.From.Name = obj.NAME_NONE + p.From.Reg = obj.REG_NONE + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() case ssa.OpRISCV64MOVaddr: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_ADDR diff --git a/src/cmd/compile/internal/s390x/ggen.go b/src/cmd/compile/internal/s390x/ggen.go index 70e40312248..c4f88e78262 100644 --- a/src/cmd/compile/internal/s390x/ggen.go +++ b/src/cmd/compile/internal/s390x/ggen.go @@ -11,7 +11,7 @@ import ( "cmd/internal/obj/s390x" ) -// clearLoopCutOff is the (somewhat arbitrary) value above which it is better +// clearLoopCutoff is the (somewhat arbitrary) value above which it is better // to have a loop of clear instructions (e.g. XCs) rather than just generating // multiple instructions (i.e. loop unrolling). // Must be between 256 and 4096. diff --git a/src/cmd/compile/internal/ssa/_gen/386.rules b/src/cmd/compile/internal/ssa/_gen/386.rules index 5f115024192..cbe56f7579e 100644 --- a/src/cmd/compile/internal/ssa/_gen/386.rules +++ b/src/cmd/compile/internal/ssa/_gen/386.rules @@ -7,6 +7,7 @@ (Add(32|64)F ...) => (ADDS(S|D) ...) (Add32carry ...) => (ADDLcarry ...) (Add32withcarry ...) => (ADCL ...) +(Add32carrywithcarry ...) => (ADCLcarry ...) (Sub(Ptr|32|16|8) ...) => (SUBL ...) (Sub(32|64)F ...) => (SUBS(S|D) ...) diff --git a/src/cmd/compile/internal/ssa/_gen/386Ops.go b/src/cmd/compile/internal/ssa/_gen/386Ops.go index 60599a33abb..09bfc4226ff 100644 --- a/src/cmd/compile/internal/ssa/_gen/386Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/386Ops.go @@ -90,22 +90,23 @@ func init() { // Common regInfo var ( - gp01 = regInfo{inputs: nil, outputs: gponly} - gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly} - gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly} - gp11sb = regInfo{inputs: []regMask{gpspsb}, outputs: gponly} - gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly} - gp11carry = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp, 0}} - gp21carry = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}} - gp1carry1 = regInfo{inputs: []regMask{gp}, outputs: gponly} - gp2carry1 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly} - gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly} - gp21sb = regInfo{inputs: []regMask{gpspsb, gpsp}, outputs: gponly} - gp21shift = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}} - gp11div = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{ax}, clobbers: dx} - gp21hmul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx}, clobbers: ax} - gp11mod = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{dx}, clobbers: ax} - gp21mul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx, ax}} + gp01 = regInfo{inputs: nil, outputs: gponly} + gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly} + gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly} + gp11sb = regInfo{inputs: []regMask{gpspsb}, outputs: gponly} + gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly} + gp11carry = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp, 0}} + gp21carry = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}} + gp1carry1 = regInfo{inputs: []regMask{gp}, outputs: gponly} + gp2carry1 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly} + gp2carry1carry = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}} + gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly} + gp21sb = regInfo{inputs: []regMask{gpspsb, gpsp}, outputs: gponly} + gp21shift = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}} + gp11div = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{ax}, clobbers: dx} + gp21hmul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx}, clobbers: ax} + gp11mod = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{dx}, clobbers: ax} + gp21mul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx, ax}} gp2flags = regInfo{inputs: []regMask{gpsp, gpsp}} gp1flags = regInfo{inputs: []regMask{gpsp}} @@ -181,10 +182,11 @@ func init() { {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true}, // arg0 + arg1 {name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32", typ: "UInt32", clobberFlags: true}, // arg0 + auxint - {name: "ADDLcarry", argLength: 2, reg: gp21carry, asm: "ADDL", commutative: true, resultInArg0: true}, // arg0 + arg1, generates pair - {name: "ADDLconstcarry", argLength: 1, reg: gp11carry, asm: "ADDL", aux: "Int32", resultInArg0: true}, // arg0 + auxint, generates pair - {name: "ADCL", argLength: 3, reg: gp2carry1, asm: "ADCL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0+arg1+carry(arg2), where arg2 is flags - {name: "ADCLconst", argLength: 2, reg: gp1carry1, asm: "ADCL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0+auxint+carry(arg1), where arg1 is flags + {name: "ADDLcarry", argLength: 2, reg: gp21carry, asm: "ADDL", commutative: true, resultInArg0: true}, // arg0 + arg1, generates pair + {name: "ADDLconstcarry", argLength: 1, reg: gp11carry, asm: "ADDL", aux: "Int32", resultInArg0: true}, // arg0 + auxint, generates pair + {name: "ADCL", argLength: 3, reg: gp2carry1, asm: "ADCL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0+arg1+carry(arg2), where arg2 is flags + {name: "ADCLcarry", argLength: 3, reg: gp2carry1carry, asm: "ADCL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0+arg1+carry(arg2), where arg2 is flags, generates pair + {name: "ADCLconst", argLength: 2, reg: gp1carry1, asm: "ADCL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0+auxint+carry(arg1), where arg1 is flags {name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true, clobberFlags: true}, // arg0 - arg1 {name: "SUBLconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index 6191a7954af..38ca44f7eb0 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -162,10 +162,19 @@ (Cvt64to32F ...) => (CVTSQ2SS ...) (Cvt64to64F ...) => (CVTSQ2SD ...) -(Cvt32Fto32 ...) => (CVTTSS2SL ...) -(Cvt32Fto64 ...) => (CVTTSS2SQ ...) -(Cvt64Fto32 ...) => (CVTTSD2SL ...) -(Cvt64Fto64 ...) => (CVTTSD2SQ ...) +// Float, to int. +// To make AMD64 "overflow" return max positive instead of max negative, compute +// y and not x, smear the sign bit, and xor. +(Cvt32Fto32 x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORL y (SARLconst [31] (ANDL y:(CVTTSS2SL x) (NOTL (MOVLf2i x))))) +(Cvt64Fto32 x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORL y (SARLconst [31] (ANDL y:(CVTTSD2SL x) (NOTL (MOVLf2i (CVTSD2SS x)))))) + +(Cvt32Fto64 x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORQ y (SARQconst [63] (ANDQ y:(CVTTSS2SQ x) (NOTQ (MOVQf2i (CVTSS2SD x))) ))) +(Cvt64Fto64 x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORQ y (SARQconst [63] (ANDQ y:(CVTTSD2SQ x) (NOTQ (MOVQf2i x))))) + +(Cvt32Fto32 x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSS2SL x) +(Cvt32Fto64 x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSS2SQ x) +(Cvt64Fto32 x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSD2SL x) +(Cvt64Fto64 x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSD2SQ x) (Cvt32Fto64F ...) => (CVTSS2SD ...) (Cvt64Fto32F ...) => (CVTSD2SS ...) @@ -388,20 +397,30 @@ (CondSelect x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && is16BitInt(t) => (CMOVW(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond) -// If the condition does not set the flags, we need to generate a comparison. -(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 1 - => (CondSelect x y (MOVBQZX check)) -(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 2 - => (CondSelect x y (MOVWQZX check)) -(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 4 - => (CondSelect x y (MOVLQZX check)) - (CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) => (CMOVQNE y x (CMPQconst [0] check)) (CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) => (CMOVLNE y x (CMPQconst [0] check)) (CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) => (CMOVWNE y x (CMPQconst [0] check)) +(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 4 && (is64BitInt(t) || isPtr(t)) + => (CMOVQNE y x (CMPLconst [0] check)) +(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 4 && is32BitInt(t) + => (CMOVLNE y x (CMPLconst [0] check)) +(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 4 && is16BitInt(t) + => (CMOVWNE y x (CMPLconst [0] check)) +(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 2 && (is64BitInt(t) || isPtr(t)) + => (CMOVQNE y x (CMPWconst [0] check)) +(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 2 && is32BitInt(t) + => (CMOVLNE y x (CMPWconst [0] check)) +(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 2 && is16BitInt(t) + => (CMOVWNE y x (CMPWconst [0] check)) +(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 1 && (is64BitInt(t) || isPtr(t)) + => (CMOVQNE y x (CMPBconst [0] check)) +(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 1 && is32BitInt(t) + => (CMOVLNE y x (CMPBconst [0] check)) +(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 1 && is16BitInt(t) + => (CMOVWNE y x (CMPBconst [0] check)) // Absorb InvertFlags (CMOVQ(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond)) diff --git a/src/cmd/compile/internal/ssa/_gen/ARM.rules b/src/cmd/compile/internal/ssa/_gen/ARM.rules index 18b5d6bba60..b63ca23de14 100644 --- a/src/cmd/compile/internal/ssa/_gen/ARM.rules +++ b/src/cmd/compile/internal/ssa/_gen/ARM.rules @@ -6,6 +6,7 @@ (Add(32|64)F ...) => (ADD(F|D) ...) (Add32carry ...) => (ADDS ...) (Add32withcarry ...) => (ADC ...) +(Add32carrywithcarry ...) => (ADCS ...) (Sub(Ptr|32|16|8) ...) => (SUB ...) (Sub(32|64)F ...) => (SUB(F|D) ...) diff --git a/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go index 43072ae9130..cc3758d1095 100644 --- a/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go @@ -144,8 +144,9 @@ func init() { gpspsbg = gpspg | buildReg("SB") fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31") callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g + r25 = buildReg("R25") r24to25 = buildReg("R24 R25") - r23to25 = buildReg("R23 R24 R25") + f16to17 = buildReg("F16 F17") rz = buildReg("ZERO") first16 = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15") ) @@ -599,8 +600,8 @@ func init() { aux: "Int64", argLength: 3, reg: regInfo{ - inputs: []regMask{gp &^ r24to25, gp &^ r24to25}, - clobbers: r24to25, // TODO: figure out needIntTemp x2 + inputs: []regMask{gp &^ r25, gp &^ r25}, + clobbers: r25 | f16to17, // TODO: figure out needIntTemp + x2 for floats }, faultOnNilArg0: true, faultOnNilArg1: true, @@ -617,8 +618,8 @@ func init() { aux: "Int64", argLength: 3, reg: regInfo{ - inputs: []regMask{gp &^ r23to25, gp &^ r23to25}, - clobbers: r23to25, // TODO: figure out needIntTemp x3 + inputs: []regMask{gp &^ r24to25, gp &^ r24to25}, + clobbers: r24to25 | f16to17, // TODO: figure out needIntTemp x2 + x2 for floats clobbersArg0: true, clobbersArg1: true, }, diff --git a/src/cmd/compile/internal/ssa/_gen/ARMOps.go b/src/cmd/compile/internal/ssa/_gen/ARMOps.go index 01cd48835e2..59bb71b2e3c 100644 --- a/src/cmd/compile/internal/ssa/_gen/ARMOps.go +++ b/src/cmd/compile/internal/ssa/_gen/ARMOps.go @@ -102,36 +102,37 @@ func init() { ) // Common regInfo var ( - gp01 = regInfo{inputs: nil, outputs: []regMask{gp}} - gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}} - gp11carry = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp, 0}} - gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}} - gp1flags = regInfo{inputs: []regMask{gpg}} - gp1flags1 = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}} - gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}} - gp21carry = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp, 0}} - gp2flags = regInfo{inputs: []regMask{gpg, gpg}} - gp2flags1 = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}} - gp22 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp, gp}} - gp31 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}} - gp31carry = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp, 0}} - gp3flags = regInfo{inputs: []regMask{gp, gp, gp}} - gp3flags1 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}} - gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}} - gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}} - gp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}} - gp2store = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}} - fp01 = regInfo{inputs: nil, outputs: []regMask{fp}} - fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}} - fp1flags = regInfo{inputs: []regMask{fp}} - fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}, clobbers: buildReg("F15")} // int-float conversion uses F15 as tmp - gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}, clobbers: buildReg("F15")} - fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}} - fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: []regMask{fp}} - fp2flags = regInfo{inputs: []regMask{fp, fp}} - fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}} - fpstore = regInfo{inputs: []regMask{gpspsbg, fp}} - readflags = regInfo{inputs: nil, outputs: []regMask{gp}} + gp01 = regInfo{inputs: nil, outputs: []regMask{gp}} + gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}} + gp11carry = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp, 0}} + gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}} + gp1flags = regInfo{inputs: []regMask{gpg}} + gp1flags1 = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}} + gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}} + gp21carry = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp, 0}} + gp2flags = regInfo{inputs: []regMask{gpg, gpg}} + gp2flags1 = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}} + gp2flags1carry = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}} + gp22 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp, gp}} + gp31 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}} + gp31carry = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp, 0}} + gp3flags = regInfo{inputs: []regMask{gp, gp, gp}} + gp3flags1 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}} + gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}} + gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}} + gp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}} + gp2store = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}} + fp01 = regInfo{inputs: nil, outputs: []regMask{fp}} + fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}} + fp1flags = regInfo{inputs: []regMask{fp}} + fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}, clobbers: buildReg("F15")} // int-float conversion uses F15 as tmp + gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}, clobbers: buildReg("F15")} + fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}} + fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: []regMask{fp}} + fp2flags = regInfo{inputs: []regMask{fp, fp}} + fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}} + fpstore = regInfo{inputs: []regMask{gpspsbg, fp}} + readflags = regInfo{inputs: nil, outputs: []regMask{gp}} ) ops := []opData{ // binary ops @@ -161,16 +162,17 @@ func init() { call: false, // TODO(mdempsky): Should this be true? }, - {name: "ADDS", argLength: 2, reg: gp21carry, asm: "ADD", commutative: true}, // arg0 + arg1, set carry flag - {name: "ADDSconst", argLength: 1, reg: gp11carry, asm: "ADD", aux: "Int32"}, // arg0 + auxInt, set carry flag - {name: "ADC", argLength: 3, reg: gp2flags1, asm: "ADC", commutative: true}, // arg0 + arg1 + carry, arg2=flags - {name: "ADCconst", argLength: 2, reg: gp1flags1, asm: "ADC", aux: "Int32"}, // arg0 + auxInt + carry, arg1=flags - {name: "SUBS", argLength: 2, reg: gp21carry, asm: "SUB"}, // arg0 - arg1, set carry flag - {name: "SUBSconst", argLength: 1, reg: gp11carry, asm: "SUB", aux: "Int32"}, // arg0 - auxInt, set carry flag - {name: "RSBSconst", argLength: 1, reg: gp11carry, asm: "RSB", aux: "Int32"}, // auxInt - arg0, set carry flag - {name: "SBC", argLength: 3, reg: gp2flags1, asm: "SBC"}, // arg0 - arg1 - carry, arg2=flags - {name: "SBCconst", argLength: 2, reg: gp1flags1, asm: "SBC", aux: "Int32"}, // arg0 - auxInt - carry, arg1=flags - {name: "RSCconst", argLength: 2, reg: gp1flags1, asm: "RSC", aux: "Int32"}, // auxInt - arg0 - carry, arg1=flags + {name: "ADDS", argLength: 2, reg: gp21carry, asm: "ADD", commutative: true}, // arg0 + arg1, set carry flag + {name: "ADDSconst", argLength: 1, reg: gp11carry, asm: "ADD", aux: "Int32"}, // arg0 + auxInt, set carry flag + {name: "ADC", argLength: 3, reg: gp2flags1, asm: "ADC", commutative: true}, // arg0 + arg1 + carry, arg2=flags + {name: "ADCconst", argLength: 2, reg: gp1flags1, asm: "ADC", aux: "Int32"}, // arg0 + auxInt + carry, arg1=flags + {name: "ADCS", argLength: 3, reg: gp2flags1carry, asm: "ADC", commutative: true}, // arg0 + arg1 + carrry, sets carry + {name: "SUBS", argLength: 2, reg: gp21carry, asm: "SUB"}, // arg0 - arg1, set carry flag + {name: "SUBSconst", argLength: 1, reg: gp11carry, asm: "SUB", aux: "Int32"}, // arg0 - auxInt, set carry flag + {name: "RSBSconst", argLength: 1, reg: gp11carry, asm: "RSB", aux: "Int32"}, // auxInt - arg0, set carry flag + {name: "SBC", argLength: 3, reg: gp2flags1, asm: "SBC"}, // arg0 - arg1 - carry, arg2=flags + {name: "SBCconst", argLength: 2, reg: gp1flags1, asm: "SBC", aux: "Int32"}, // arg0 - auxInt - carry, arg1=flags + {name: "RSCconst", argLength: 2, reg: gp1flags1, asm: "RSC", aux: "Int32"}, // auxInt - arg0 - carry, arg1=flags {name: "MULLU", argLength: 2, reg: gp22, asm: "MULLU", commutative: true}, // arg0 * arg1, high 32 bits in out0, low 32 bits in out1 {name: "MULA", argLength: 3, reg: gp31, asm: "MULA"}, // arg0 * arg1 + arg2 diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64.rules b/src/cmd/compile/internal/ssa/_gen/LOONG64.rules index 287eedee374..9691296043a 100644 --- a/src/cmd/compile/internal/ssa/_gen/LOONG64.rules +++ b/src/cmd/compile/internal/ssa/_gen/LOONG64.rules @@ -611,15 +611,24 @@ (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem) // register indexed load -(MOVVload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVVloadidx ptr idx mem) -(MOVWUload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx ptr idx mem) -(MOVWload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx ptr idx mem) -(MOVHUload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx ptr idx mem) -(MOVHload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx ptr idx mem) -(MOVBUload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVBUloadidx ptr idx mem) -(MOVBload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVBloadidx ptr idx mem) -(MOVFload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVFloadidx ptr idx mem) -(MOVDload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx ptr idx mem) +(MOVVload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVVloadidx ptr idx mem) +(MOVVload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVVloadidx ptr (SLLVconst [shift] idx) mem) +(MOVWUload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx ptr idx mem) +(MOVWUload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx ptr (SLLVconst [shift] idx) mem) +(MOVWload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx ptr idx mem) +(MOVWload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx ptr (SLLVconst [shift] idx) mem) +(MOVHUload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx ptr idx mem) +(MOVHUload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx ptr (SLLVconst [shift] idx) mem) +(MOVHload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx ptr idx mem) +(MOVHload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx ptr (SLLVconst [shift] idx) mem) +(MOVBUload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVBUloadidx ptr idx mem) +(MOVBUload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVBUloadidx ptr (SLLVconst [shift] idx) mem) +(MOVBload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVBloadidx ptr idx mem) +(MOVBload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVBloadidx ptr (SLLVconst [shift] idx) mem) +(MOVFload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVFloadidx ptr idx mem) +(MOVFload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVFloadidx ptr (SLLVconst [shift] idx) mem) +(MOVDload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx ptr idx mem) +(MOVDload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx ptr (SLLVconst [shift] idx) mem) (MOVVloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVVload [int32(c)] ptr mem) (MOVVloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVVload [int32(c)] ptr mem) (MOVWUloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem) @@ -640,12 +649,18 @@ (MOVDloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVDload [int32(c)] ptr mem) // register indexed store -(MOVVstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVVstoreidx ptr idx val mem) -(MOVWstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx ptr idx val mem) -(MOVHstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx ptr idx val mem) -(MOVBstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVBstoreidx ptr idx val mem) -(MOVFstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVFstoreidx ptr idx val mem) -(MOVDstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx ptr idx val mem) +(MOVVstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVVstoreidx ptr idx val mem) +(MOVVstore [off] {sym} (ADDshiftLLV [shift] ptr idx) val mem) && off == 0 && sym == nil => (MOVVstoreidx ptr (SLLVconst [shift] idx) val mem) +(MOVWstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx ptr idx val mem) +(MOVWstore [off] {sym} (ADDshiftLLV [shift] ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx ptr (SLLVconst [shift] idx) val mem) +(MOVHstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx ptr idx val mem) +(MOVHstore [off] {sym} (ADDshiftLLV [shift] ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx ptr (SLLVconst [shift] idx) val mem) +(MOVBstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVBstoreidx ptr idx val mem) +(MOVBstore [off] {sym} (ADDshiftLLV [shift] ptr idx) val mem) && off == 0 && sym == nil => (MOVBstoreidx ptr (SLLVconst [shift] idx) val mem) +(MOVFstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVFstoreidx ptr idx val mem) +(MOVFstore [off] {sym} (ADDshiftLLV [shift] ptr idx) val mem) && off == 0 && sym == nil => (MOVFstoreidx ptr (SLLVconst [shift] idx) val mem) +(MOVDstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx ptr idx val mem) +(MOVDstore [off] {sym} (ADDshiftLLV [shift] ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx ptr (SLLVconst [shift] idx) val mem) (MOVVstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVVstore [int32(c)] ptr val mem) (MOVVstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVVstore [int32(c)] idx val mem) (MOVWstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVWstore [int32(c)] ptr val mem) diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go index a85a566660e..7e8b8bf497b 100644 --- a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go @@ -143,6 +143,7 @@ func init() { gp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}} gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}} gpstore2 = regInfo{inputs: []regMask{gpspsbg, gpg, gpg | rz}} + gpoldatom = regInfo{inputs: []regMask{gpspsbg, gpg}} gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}} gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}} preldreg = regInfo{inputs: []regMask{gpspg}} @@ -431,6 +432,12 @@ func init() { faultOnNilArg1: true, }, + // Atomic operations. + // + // resultNotInArgs is needed by all ops lowering to LoongArch + // atomic memory access instructions, because these instructions + // are defined to require rd != rj && rd != rk per the ISA spec. + // atomic loads. // load from arg0. arg1=mem. // returns so they can be properly ordered with other loads. @@ -500,8 +507,8 @@ func init() { // Atomic 32 bit AND/OR. // *arg0 &= (|=) arg1. arg2=mem. returns nil. - {name: "LoweredAtomicAnd32", argLength: 3, reg: gpxchg, asm: "AMANDDBW", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, - {name: "LoweredAtomicOr32", argLength: 3, reg: gpxchg, asm: "AMORDBW", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicAnd32", argLength: 3, reg: gpoldatom, asm: "AMANDDBW", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicOr32", argLength: 3, reg: gpoldatom, asm: "AMORDBW", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, // Atomic 32,64 bit AND/OR. // *arg0 &= (|=) arg1. arg2=mem. returns . auxint must be zero. diff --git a/src/cmd/compile/internal/ssa/_gen/MIPS.rules b/src/cmd/compile/internal/ssa/_gen/MIPS.rules index 80bf9017f52..fe1e00a4e4c 100644 --- a/src/cmd/compile/internal/ssa/_gen/MIPS.rules +++ b/src/cmd/compile/internal/ssa/_gen/MIPS.rules @@ -9,6 +9,12 @@ (Select1 (Add32carry x y)) => (SGTU x (ADD x y)) (Add32withcarry x y c) => (ADD c (ADD x y)) +(Select0 (Add32carrywithcarry x y c)) => (ADD c (ADD x y)) +(Select1 (Add32carrywithcarry x y c)) => + (OR + (SGTU x xy:(ADD x y)) + (SGTU xy (ADD c xy))) + (Sub(Ptr|32|16|8) ...) => (SUB ...) (Sub(32|64)F ...) => (SUB(F|D) ...) diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64.rules b/src/cmd/compile/internal/ssa/_gen/PPC64.rules index f5e381ac413..b5e8d81da2d 100644 --- a/src/cmd/compile/internal/ssa/_gen/PPC64.rules +++ b/src/cmd/compile/internal/ssa/_gen/PPC64.rules @@ -18,7 +18,10 @@ (Max(32|64)F x y) && buildcfg.GOPPC64 >= 9 => (XSMAXJDP x y) // Combine 64 bit integer multiply and adds -(ADD l:(MULLD x y) z) && buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD x y z) +(ADD z l:(MULLD x y)) && buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD x y z ) +(ADD z l:(MULLDconst [x] y)) && buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD (MOVDconst [int64(x)]) y z ) +(ADDconst [z] l:(MULLD x y)) && buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD x y (MOVDconst [int64(z)])) +(ADDconst [z] l:(MULLDconst [x] y)) && buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD (MOVDconst [int64(x)]) y (MOVDconst [int64(z)])) (Mod16 x y) => (Mod32 (SignExt16to32 x) (SignExt16to32 y)) (Mod16u x y) => (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y)) diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules index 7059273eb2f..646948f2df2 100644 --- a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules +++ b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules @@ -467,8 +467,7 @@ (OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr) (Const(64|32|16|8) [val]) => (MOVDconst [int64(val)]) -(Const32F [val]) => (FMVSX (MOVDconst [int64(math.Float32bits(val))])) -(Const64F [val]) => (FMVDX (MOVDconst [int64(math.Float64bits(val))])) +(Const(64|32)F ...) => (FMOV(D|F)const ...) (ConstNil) => (MOVDconst [0]) (ConstBool [val]) => (MOVDconst [int64(b2i(val))]) @@ -824,16 +823,28 @@ (F(MADD|NMADD|MSUB|NMSUB)D x y neg:(FNEGD z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)D x y z) // Test for -∞ (bit 0) using 64 bit classify instruction. -(FLTD x (FMVDX (MOVDconst [int64(math.Float64bits(-math.MaxFloat64))]))) => (ANDI [1] (FCLASSD x)) -(FLED (FMVDX (MOVDconst [int64(math.Float64bits(-math.MaxFloat64))])) x) => (SNEZ (ANDI [0xff &^ 1] (FCLASSD x))) -(FEQD x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(-1)))]))) => (ANDI [1] (FCLASSD x)) -(FNED x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(-1)))]))) => (SEQZ (ANDI [1] (FCLASSD x))) +(FLTD x (FMOVDconst [-math.MaxFloat64])) => (ANDI [0b00_0000_0001] (FCLASSD x)) +(FLED (FMOVDconst [-math.MaxFloat64]) x) => (SNEZ (ANDI [0b00_1111_1110] (FCLASSD x))) +(FEQD x (FMOVDconst [math.Inf(-1)])) => (ANDI [0b00_0000_0001] (FCLASSD x)) +(FNED x (FMOVDconst [math.Inf(-1)])) => (SEQZ (ANDI [0b00_0000_0001] (FCLASSD x))) // Test for +∞ (bit 7) using 64 bit classify instruction. -(FLTD (FMVDX (MOVDconst [int64(math.Float64bits(math.MaxFloat64))])) x) => (SNEZ (ANDI [1<<7] (FCLASSD x))) -(FLED x (FMVDX (MOVDconst [int64(math.Float64bits(math.MaxFloat64))]))) => (SNEZ (ANDI [0xff &^ (1<<7)] (FCLASSD x))) -(FEQD x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(1)))]))) => (SNEZ (ANDI [1<<7] (FCLASSD x))) -(FNED x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(1)))]))) => (SEQZ (ANDI [1<<7] (FCLASSD x))) +(FLTD (FMOVDconst [math.MaxFloat64]) x) => (SNEZ (ANDI [0b00_1000_0000] (FCLASSD x))) +(FLED x (FMOVDconst [math.MaxFloat64])) => (SNEZ (ANDI [0b00_0111_1111] (FCLASSD x))) +(FEQD x (FMOVDconst [math.Inf(1)])) => (SNEZ (ANDI [0b00_1000_0000] (FCLASSD x))) +(FNED x (FMOVDconst [math.Inf(1)])) => (SEQZ (ANDI [0b00_1000_0000] (FCLASSD x))) + +// Test for subnormal numbers using 64 bit classify instruction. +(FLTD x (FMOVDconst [+0x1p-1022])) => (SNEZ (ANDI [0b00_0011_1111] (FCLASSD x))) +(FLED (FMOVDconst [+0x1p-1022]) x) => (SNEZ (ANDI [0b00_1100_0000] (FCLASSD x))) +(FLED x (FMOVDconst [-0x1p-1022])) => (SNEZ (ANDI [0b00_0000_0011] (FCLASSD x))) +(FLTD (FMOVDconst [-0x1p-1022]) x) => (SNEZ (ANDI [0b00_1111_1100] (FCLASSD x))) + +// Absorb unary sign bit operations into 64 bit classify instruction. +(S(EQ|NE)Z (ANDI [c] (FCLASSD (FNEGD x)))) => (S(EQ|NE)Z (ANDI [(c&0b11_0000_0000)|int64(bits.Reverse8(uint8(c))&0b1111_1111)] (FCLASSD x))) +(S(EQ|NE)Z (ANDI [c] (FCLASSD (FABSD x)))) => (S(EQ|NE)Z (ANDI [(c&0b11_1111_0000)|int64(bits.Reverse8(uint8(c))&0b0000_1111)] (FCLASSD x))) +(B(EQ|NE)Z (ANDI [c] (FCLASSD (FNEGD x))) yes no) => (B(EQ|NE)Z (ANDI [(c&0b11_0000_0000)|int64(bits.Reverse8(uint8(c))&0b1111_1111)] (FCLASSD x)) yes no) +(B(EQ|NE)Z (ANDI [c] (FCLASSD (FABSD x))) yes no) => (B(EQ|NE)Z (ANDI [(c&0b11_1111_0000)|int64(bits.Reverse8(uint8(c))&0b0000_1111)] (FCLASSD x)) yes no) // // Optimisations for rva22u64 and above. diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go b/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go index dc433ff9749..a0e1ab9754d 100644 --- a/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go @@ -99,7 +99,7 @@ func init() { } } - // Floating pointer registers. + // Floating point registers. for r := 32; r <= 63; r++ { mask := addreg(r, "") fpMask |= mask @@ -132,6 +132,7 @@ func init() { gpcas = regInfo{inputs: []regMask{gpspsbgMask, gpgMask, gpgMask}, outputs: []regMask{gpMask}} gpatomic = regInfo{inputs: []regMask{gpspsbgMask, gpgMask}} + fp01 = regInfo{outputs: []regMask{fpMask}} fp11 = regInfo{inputs: []regMask{fpMask}, outputs: []regMask{fpMask}} fp21 = regInfo{inputs: []regMask{fpMask, fpMask}, outputs: []regMask{fpMask}} fp31 = regInfo{inputs: []regMask{fpMask, fpMask, fpMask}, outputs: []regMask{fpMask}} @@ -176,7 +177,9 @@ func init() { {name: "MOVaddr", argLength: 1, reg: gp11sb, asm: "MOV", aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux // auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address - {name: "MOVDconst", reg: gp01, asm: "MOV", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint + {name: "MOVDconst", reg: gp01, asm: "MOV", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint + {name: "FMOVDconst", reg: fp01, asm: "MOVD", typ: "Float64", aux: "Float64", rematerializeable: true}, // auxint + {name: "FMOVFconst", reg: fp01, asm: "MOVF", typ: "Float32", aux: "Float32", rematerializeable: true}, // auxint // Loads: load bits from arg0+auxint+aux and extend to 64 bits; arg1=mem {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVB", aux: "SymOff", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // 8 bits, sign extend diff --git a/src/cmd/compile/internal/ssa/_gen/Wasm.rules b/src/cmd/compile/internal/ssa/_gen/Wasm.rules index f632a01109f..60281522539 100644 --- a/src/cmd/compile/internal/ssa/_gen/Wasm.rules +++ b/src/cmd/compile/internal/ssa/_gen/Wasm.rules @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +(Last ___) => v.Args[len(v.Args)-1] + // Lowering arithmetic (Add(64|32|16|8|Ptr) ...) => (I64Add ...) (Add(64|32)F ...) => (F(64|32)Add ...) @@ -44,6 +46,37 @@ (Not ...) => (I64Eqz ...) +(Avg64u x y) => (I64Add (I64ShrU (I64Sub x y) (I64Const [1])) y) + +// High word of multiply without carry bits; see Hacker's Delight, 2nd. ed, Figure 8-2, p. 174. +(Hmul64 x y) => + (Last + x0: (ZeroExt32to64 x) + x1: (I64ShrS x (I64Const [32])) + y0: (ZeroExt32to64 y) + y1: (I64ShrS y (I64Const [32])) + x0y0: (I64Mul x0 y0) + tt: (I64Add (I64Mul x1 y0) (I64ShrU x0y0 (I64Const [32]))) + w1: (I64Add (I64Mul x0 y1) (ZeroExt32to64 tt)) + w2: (I64ShrS tt (I64Const [32])) + (I64Add (I64Add (I64Mul x1 y1) w2) (I64ShrS w1 (I64Const [32])))) + +// Same as Hmul64 but signed shifts now unsigned. +(Hmul64u x y) => + (Last + x0: (ZeroExt32to64 x) + x1: (I64ShrU x (I64Const [32])) + y0: (ZeroExt32to64 y) + y1: (I64ShrU y (I64Const [32])) + w0: (I64Mul x0 y0) + tt: (I64Add (I64Mul x1 y0) (I64ShrU w0 (I64Const [32]))) + w1: (I64Add (I64Mul x0 y1) (ZeroExt32to64 tt)) + w2: (I64ShrU tt (I64Const [32])) + hi: (I64Add (I64Add (I64Mul x1 y1) w2) (I64ShrU w1 (I64Const [32])))) + +(Select0 (Mul64uhilo x y)) => (Hmul64u x y) +(Select1 (Mul64uhilo x y)) => (I64Mul x y) + // Lowering pointer arithmetic (OffPtr ...) => (I64AddConst ...) diff --git a/src/cmd/compile/internal/ssa/_gen/allocators.go b/src/cmd/compile/internal/ssa/_gen/allocators.go index 38acc5133ab..246fe98a21d 100644 --- a/src/cmd/compile/internal/ssa/_gen/allocators.go +++ b/src/cmd/compile/internal/ssa/_gen/allocators.go @@ -122,6 +122,11 @@ func genAllocators() { typ: "[]ID", base: "LimitSlice", }, + { + name: "UintSlice", + typ: "[]uint", + base: "LimitSlice", + }, } w := new(bytes.Buffer) diff --git a/src/cmd/compile/internal/ssa/_gen/dec.rules b/src/cmd/compile/internal/ssa/_gen/dec.rules index 5309a7f6b49..9f6dc369759 100644 --- a/src/cmd/compile/internal/ssa/_gen/dec.rules +++ b/src/cmd/compile/internal/ssa/_gen/dec.rules @@ -4,7 +4,7 @@ // This file contains rules to decompose builtin compound types // (complex,string,slice,interface) into their constituent -// types. These rules work together with the decomposeBuiltIn +// types. These rules work together with the decomposeBuiltin // pass which handles phis of these types. (Store {t} _ _ mem) && t.Size() == 0 => mem diff --git a/src/cmd/compile/internal/ssa/_gen/dec64.rules b/src/cmd/compile/internal/ssa/_gen/dec64.rules index ba776af1a70..483818906e6 100644 --- a/src/cmd/compile/internal/ssa/_gen/dec64.rules +++ b/src/cmd/compile/internal/ssa/_gen/dec64.rules @@ -3,11 +3,15 @@ // license that can be found in the LICENSE file. // This file contains rules to decompose [u]int64 types on 32-bit -// architectures. These rules work together with the decomposeBuiltIn +// architectures. These rules work together with the decomposeBuiltin // pass which handles phis of these typ. +(Last ___) => v.Args[len(v.Args)-1] + (Int64Hi (Int64Make hi _)) => hi (Int64Lo (Int64Make _ lo)) => lo +(Select0 (MakeTuple x y)) => x +(Select1 (MakeTuple x y)) => y (Load ptr mem) && is64BitInt(t) && !config.BigEndian && t.IsSigned() => (Int64Make @@ -60,30 +64,85 @@ (Arg {n} [off]) (Arg {n} [off+4])) -(Add64 x y) => - (Int64Make - (Add32withcarry - (Int64Hi x) - (Int64Hi y) - (Select1 (Add32carry (Int64Lo x) (Int64Lo y)))) - (Select0 (Add32carry (Int64Lo x) (Int64Lo y)))) +(Add64 x y) => + (Last + x0: (Int64Lo x) + x1: (Int64Hi x) + y0: (Int64Lo y) + y1: (Int64Hi y) + add: (Add32carry x0 y0) + (Int64Make + (Add32withcarry x1 y1 (Select1 add)) + (Select0 add))) -(Sub64 x y) => - (Int64Make - (Sub32withcarry - (Int64Hi x) - (Int64Hi y) - (Select1 (Sub32carry (Int64Lo x) (Int64Lo y)))) - (Select0 (Sub32carry (Int64Lo x) (Int64Lo y)))) +(Sub64 x y) => + (Last + x0: (Int64Lo x) + x1: (Int64Hi x) + y0: (Int64Lo y) + y1: (Int64Hi y) + sub: (Sub32carry x0 y0) + (Int64Make + (Sub32withcarry x1 y1 (Select1 sub)) + (Select0 sub))) + +(Mul64 x y) => + (Last + x0: (Int64Lo x) + x1: (Int64Hi x) + y0: (Int64Lo y) + y1: (Int64Hi y) + x0y0: (Mul32uhilo x0 y0) + x0y0Hi: (Select0 x0y0) + x0y0Lo: (Select1 x0y0) + (Int64Make + (Add32 x0y0Hi + (Add32 + (Mul32 x0 y1) + (Mul32 x1 y0))) + x0y0Lo)) + +(Mul64uhilo x y) => + (Last + x0: (Int64Lo x) + x1: (Int64Hi x) + y0: (Int64Lo y) + y1: (Int64Hi y) + x0y0: (Mul32uhilo x0 y0) + x0y1: (Mul32uhilo x0 y1) + x1y0: (Mul32uhilo x1 y0) + x1y1: (Mul32uhilo x1 y1) + x0y0Hi: (Select0 x0y0) + x0y0Lo: (Select1 x0y0) + x0y1Hi: (Select0 x0y1) + x0y1Lo: (Select1 x0y1) + x1y0Hi: (Select0 x1y0) + x1y0Lo: (Select1 x1y0) + x1y1Hi: (Select0 x1y1) + x1y1Lo: (Select1 x1y1) + w1a: (Add32carry x0y0Hi x0y1Lo) + w2a: (Add32carrywithcarry x0y1Hi x1y0Hi (Select1 w1a)) + w3a: (Add32withcarry x1y1Hi (Const32 [0]) (Select1 w2a)) + w1b: (Add32carry x1y0Lo (Select0 w1a)) + w2b: (Add32carrywithcarry x1y1Lo (Select0 w2a) (Select1 w1b)) + w3b: (Add32withcarry w3a (Const32 [0]) (Select1 w2b)) + (MakeTuple + (Int64Make w3b (Select0 w2b)) + (Int64Make (Select0 w1b) x0y0Lo))) + +(Hmul64u x y) => (Select0 (Mul64uhilo x y)) + +// Hacker's Delight p. 175: signed hmul = unsigned hmul - (x<0)&y - (y<0)&x. +(Hmul64 x y) => + (Last + p: (Hmul64u x y) + xSign: (Int64Make xs:(Rsh32x32 (Int64Hi x) (Const32 [31])) xs) + ySign: (Int64Make ys:(Rsh32x32 (Int64Hi y) (Const32 [31])) ys) + (Sub64 (Sub64 p (And64 xSign y)) (And64 ySign x))) + +// (x+y)/2 => (x-y)/2 + y +(Avg64u x y) => (Add64 (Rsh64Ux32 (Sub64 x y) (Const32 [1])) y) -(Mul64 x y) => - (Int64Make - (Add32 - (Mul32 (Int64Lo x) (Int64Hi y)) - (Add32 - (Mul32 (Int64Hi x) (Int64Lo y)) - (Select0 (Mul32uhilo (Int64Lo x) (Int64Lo y))))) - (Select1 (Mul32uhilo (Int64Lo x) (Int64Lo y)))) (And64 x y) => (Int64Make @@ -217,11 +276,32 @@ (Rsh8x64 x y) => (Rsh8x32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) (Rsh8Ux64 x y) => (Rsh8Ux32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) + (RotateLeft64 x (Int64Make hi lo)) => (RotateLeft64 x lo) (RotateLeft32 x (Int64Make hi lo)) => (RotateLeft32 x lo) (RotateLeft16 x (Int64Make hi lo)) => (RotateLeft16 x lo) (RotateLeft8 x (Int64Make hi lo)) => (RotateLeft8 x lo) +// RotateLeft64 by constant, for use in divmod. +(RotateLeft64 x (Const(64|32|16|8) [c])) && c&63 == 0 => x +(RotateLeft64 x (Const(64|32|16|8) [c])) && c&63 == 32 => (Int64Make (Int64Lo x) (Int64Hi x)) +(RotateLeft64 x (Const(64|32|16|8) [c])) && 0 < c&63 && c&63 < 32 => + (Int64Make + (Or32 + (Lsh32x32 (Int64Hi x) (Const32 [int32(c&31)])) + (Rsh32Ux32 (Int64Lo x) (Const32 [int32(32-c&31)]))) + (Or32 + (Lsh32x32 (Int64Lo x) (Const32 [int32(c&31)])) + (Rsh32Ux32 (Int64Hi x) (Const32 [int32(32-c&31)])))) +(RotateLeft64 x (Const(64|32|16|8) [c])) && 32 < c&63 && c&63 < 64 => + (Int64Make + (Or32 + (Lsh32x32 (Int64Lo x) (Const32 [int32(c&31)])) + (Rsh32Ux32 (Int64Hi x) (Const32 [int32(32-c&31)]))) + (Or32 + (Lsh32x32 (Int64Hi x) (Const32 [int32(c&31)])) + (Rsh32Ux32 (Int64Lo x) (Const32 [int32(32-c&31)])))) + // Clean up constants a little (Or32 (Zeromask (Const32 [c])) y) && c == 0 => y (Or32 (Zeromask (Const32 [c])) y) && c != 0 => (Const32 [-1]) diff --git a/src/cmd/compile/internal/ssa/_gen/divisible.rules b/src/cmd/compile/internal/ssa/_gen/divisible.rules new file mode 100644 index 00000000000..8c198838267 --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/divisible.rules @@ -0,0 +1,167 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Divisibility checks (x%c == 0 or x%c != 0) convert to multiply, rotate, compare. +// The opt pass rewrote x%c to x-(x/c)*c +// and then also rewrote x-(x/c)*c == 0 to x == (x/c)*c. +// If x/c is being used for a division already (div.Uses != 1) +// then we leave the expression alone. +// +// See ../magic.go for a detailed description of these algorithms. +// See test/codegen/divmod.go for tests. +// See divmod.rules for other division rules that run after these. + +// Divisiblity by unsigned or signed power of two. +(Eq(8|16|32|64) x (Mul(8|16|32|64) (Div(8|16|32|64)u x (Const(8|16|32|64) [c])) (Const(8|16|32|64) [c]))) + && x.Op != OpConst64 && isPowerOfTwo(c) => + (Eq(8|16|32|64) (And(8|16|32|64) x (Const(8|16|32|64) [c-1])) (Const(8|16|32|64) [0])) +(Eq(8|16|32|64) x (Mul(8|16|32|64) (Div(8|16|32|64) x (Const(8|16|32|64) [c])) (Const(8|16|32|64) [c]))) + && x.Op != OpConst64 && isPowerOfTwo(c) => + (Eq(8|16|32|64) (And(8|16|32|64) x (Const(8|16|32|64) [c-1])) (Const(8|16|32|64) [0])) +(Neq(8|16|32|64) x (Mul(8|16|32|64) (Div(8|16|32|64)u x (Const(8|16|32|64) [c])) (Const(8|16|32|64) [c]))) + && x.Op != OpConst64 && isPowerOfTwo(c) => + (Neq(8|16|32|64) (And(8|16|32|64) x (Const(8|16|32|64) [c-1])) (Const(8|16|32|64) [0])) +(Neq(8|16|32|64) x (Mul(8|16|32|64) (Div(8|16|32|64) x (Const(8|16|32|64) [c])) (Const(8|16|32|64) [c]))) + && x.Op != OpConst64 && isPowerOfTwo(c) => + (Neq(8|16|32|64) (And(8|16|32|64) x (Const(8|16|32|64) [c-1])) (Const(8|16|32|64) [0])) + +// Divisiblity by unsigned. +(Eq8 x (Mul8 div:(Div8u x (Const8 [c])) (Const8 [c]))) + && div.Uses == 1 + && x.Op != OpConst8 && udivisibleOK8(c) => + (Leq8U + (RotateLeft8 + (Mul8 x (Const8 [int8(udivisible8(c).m)])) + (Const8 [int8(8 - udivisible8(c).k)])) + (Const8 [int8(udivisible8(c).max)])) +(Neq8 x (Mul8 div:(Div8u x (Const8 [c])) (Const8 [c]))) + && div.Uses == 1 + && x.Op != OpConst8 && udivisibleOK8(c) => + (Less8U + (Const8 [int8(udivisible8(c).max)]) + (RotateLeft8 + (Mul8 x (Const8 [int8(udivisible8(c).m)])) + (Const8 [int8(8 - udivisible8(c).k)]))) +(Eq16 x (Mul16 div:(Div16u x (Const16 [c])) (Const16 [c]))) + && div.Uses == 1 + && x.Op != OpConst16 && udivisibleOK16(c) => + (Leq16U + (RotateLeft16 + (Mul16 x (Const16 [int16(udivisible16(c).m)])) + (Const16 [int16(16 - udivisible16(c).k)])) + (Const16 [int16(udivisible16(c).max)])) +(Neq16 x (Mul16 div:(Div16u x (Const16 [c])) (Const16 [c]))) + && div.Uses == 1 + && x.Op != OpConst16 && udivisibleOK16(c) => + (Less16U + (Const16 [int16(udivisible16(c).max)]) + (RotateLeft16 + (Mul16 x (Const16 [int16(udivisible16(c).m)])) + (Const16 [int16(16 - udivisible16(c).k)]))) +(Eq32 x (Mul32 div:(Div32u x (Const32 [c])) (Const32 [c]))) + && div.Uses == 1 + && x.Op != OpConst32 && udivisibleOK32(c) => + (Leq32U + (RotateLeft32 + (Mul32 x (Const32 [int32(udivisible32(c).m)])) + (Const32 [int32(32 - udivisible32(c).k)])) + (Const32 [int32(udivisible32(c).max)])) +(Neq32 x (Mul32 div:(Div32u x (Const32 [c])) (Const32 [c]))) + && div.Uses == 1 + && x.Op != OpConst32 && udivisibleOK32(c) => + (Less32U + (Const32 [int32(udivisible32(c).max)]) + (RotateLeft32 + (Mul32 x (Const32 [int32(udivisible32(c).m)])) + (Const32 [int32(32 - udivisible32(c).k)]))) +(Eq64 x (Mul64 div:(Div64u x (Const64 [c])) (Const64 [c]))) + && div.Uses == 1 + && x.Op != OpConst64 && udivisibleOK64(c) => + (Leq64U + (RotateLeft64 + (Mul64 x (Const64 [int64(udivisible64(c).m)])) + (Const64 [int64(64 - udivisible64(c).k)])) + (Const64 [int64(udivisible64(c).max)])) +(Neq64 x (Mul64 div:(Div64u x (Const64 [c])) (Const64 [c]))) + && div.Uses == 1 + && x.Op != OpConst64 && udivisibleOK64(c) => + (Less64U + (Const64 [int64(udivisible64(c).max)]) + (RotateLeft64 + (Mul64 x (Const64 [int64(udivisible64(c).m)])) + (Const64 [int64(64 - udivisible64(c).k)]))) + +// Divisiblity by signed. +(Eq8 x (Mul8 div:(Div8 x (Const8 [c])) (Const8 [c]))) + && div.Uses == 1 + && x.Op != OpConst8 && sdivisibleOK8(c) => + (Leq8U + (RotateLeft8 + (Add8 (Mul8 x (Const8 [int8(sdivisible8(c).m)])) + (Const8 [int8(sdivisible8(c).a)])) + (Const8 [int8(8 - sdivisible8(c).k)])) + (Const8 [int8(sdivisible8(c).max)])) +(Neq8 x (Mul8 div:(Div8 x (Const8 [c])) (Const8 [c]))) + && div.Uses == 1 + && x.Op != OpConst8 && sdivisibleOK8(c) => + (Less8U + (Const8 [int8(sdivisible8(c).max)]) + (RotateLeft8 + (Add8 (Mul8 x (Const8 [int8(sdivisible8(c).m)])) + (Const8 [int8(sdivisible8(c).a)])) + (Const8 [int8(8 - sdivisible8(c).k)]))) +(Eq16 x (Mul16 div:(Div16 x (Const16 [c])) (Const16 [c]))) + && div.Uses == 1 + && x.Op != OpConst16 && sdivisibleOK16(c) => + (Leq16U + (RotateLeft16 + (Add16 (Mul16 x (Const16 [int16(sdivisible16(c).m)])) + (Const16 [int16(sdivisible16(c).a)])) + (Const16 [int16(16 - sdivisible16(c).k)])) + (Const16 [int16(sdivisible16(c).max)])) +(Neq16 x (Mul16 div:(Div16 x (Const16 [c])) (Const16 [c]))) + && div.Uses == 1 + && x.Op != OpConst16 && sdivisibleOK16(c) => + (Less16U + (Const16 [int16(sdivisible16(c).max)]) + (RotateLeft16 + (Add16 (Mul16 x (Const16 [int16(sdivisible16(c).m)])) + (Const16 [int16(sdivisible16(c).a)])) + (Const16 [int16(16 - sdivisible16(c).k)]))) +(Eq32 x (Mul32 div:(Div32 x (Const32 [c])) (Const32 [c]))) + && div.Uses == 1 + && x.Op != OpConst32 && sdivisibleOK32(c) => + (Leq32U + (RotateLeft32 + (Add32 (Mul32 x (Const32 [int32(sdivisible32(c).m)])) + (Const32 [int32(sdivisible32(c).a)])) + (Const32 [int32(32 - sdivisible32(c).k)])) + (Const32 [int32(sdivisible32(c).max)])) +(Neq32 x (Mul32 div:(Div32 x (Const32 [c])) (Const32 [c]))) + && div.Uses == 1 + && x.Op != OpConst32 && sdivisibleOK32(c) => + (Less32U + (Const32 [int32(sdivisible32(c).max)]) + (RotateLeft32 + (Add32 (Mul32 x (Const32 [int32(sdivisible32(c).m)])) + (Const32 [int32(sdivisible32(c).a)])) + (Const32 [int32(32 - sdivisible32(c).k)]))) +(Eq64 x (Mul64 div:(Div64 x (Const64 [c])) (Const64 [c]))) + && div.Uses == 1 + && x.Op != OpConst64 && sdivisibleOK64(c) => + (Leq64U + (RotateLeft64 + (Add64 (Mul64 x (Const64 [int64(sdivisible64(c).m)])) + (Const64 [int64(sdivisible64(c).a)])) + (Const64 [int64(64 - sdivisible64(c).k)])) + (Const64 [int64(sdivisible64(c).max)])) +(Neq64 x (Mul64 div:(Div64 x (Const64 [c])) (Const64 [c]))) + && div.Uses == 1 + && x.Op != OpConst64 && sdivisibleOK64(c) => + (Less64U + (Const64 [int64(sdivisible64(c).max)]) + (RotateLeft64 + (Add64 (Mul64 x (Const64 [int64(sdivisible64(c).m)])) + (Const64 [int64(sdivisible64(c).a)])) + (Const64 [int64(64 - sdivisible64(c).k)]))) diff --git a/src/cmd/compile/internal/ssa/_gen/divisibleOps.go b/src/cmd/compile/internal/ssa/_gen/divisibleOps.go new file mode 100644 index 00000000000..9fcd03aadb8 --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/divisibleOps.go @@ -0,0 +1,18 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +var divisibleOps = []opData{} + +var divisibleBlocks = []blockData{} + +func init() { + archs = append(archs, arch{ + name: "divisible", + ops: divisibleOps, + blocks: divisibleBlocks, + generic: true, + }) +} diff --git a/src/cmd/compile/internal/ssa/_gen/divmod.rules b/src/cmd/compile/internal/ssa/_gen/divmod.rules new file mode 100644 index 00000000000..7dd7d245bd0 --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/divmod.rules @@ -0,0 +1,235 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Lowering of mul, div, and mod operations. +// Runs after prove, so that prove can analyze div and mod ops +// directly instead of these obscured expansions, +// but before decompose builtin, so that 32-bit systems +// can still lower 64-bit ops to 32-bit ones. +// +// See ../magic.go for a detailed description of these algorithms. +// See test/codegen/divmod.go for tests. + +// Unsigned div and mod by power of 2 handled in generic.rules. +// (The equivalent unsigned right shift and mask are simple enough for prove to analyze.) + +// Signed divide by power of 2. +// n / c = n >> log(c) if n >= 0 +// = (n+c-1) >> log(c) if n < 0 +// We conditionally add c-1 by adding n>>63>>(64-log(c)) (first shift signed, second shift unsigned). +(Div8 n (Const8 [c])) && isPowerOfTwo(c) => + (Rsh8x64 + (Add8 n (Rsh8Ux64 (Rsh8x64 n (Const64 [ 7])) (Const64 [int64( 8-log8(c))]))) + (Const64 [int64(log8(c))])) +(Div16 n (Const16 [c])) && isPowerOfTwo(c) => + (Rsh16x64 + (Add16 n (Rsh16Ux64 (Rsh16x64 n (Const64 [15])) (Const64 [int64(16-log16(c))]))) + (Const64 [int64(log16(c))])) +(Div32 n (Const32 [c])) && isPowerOfTwo(c) => + (Rsh32x64 + (Add32 n (Rsh32Ux64 (Rsh32x64 n (Const64 [31])) (Const64 [int64(32-log32(c))]))) + (Const64 [int64(log32(c))])) +(Div64 n (Const64 [c])) && isPowerOfTwo(c) => + (Rsh64x64 + (Add64 n (Rsh64Ux64 (Rsh64x64 n (Const64 [63])) (Const64 [int64(64-log64(c))]))) + (Const64 [int64(log64(c))])) + +// Divide, not a power of 2, by strength reduction to double-width multiply and shift. +// +// umagicN(c) computes m, s such that N-bit unsigned divide +// x/c = (x*((1<>N>>s = ((x*m)>>N+x)>>s +// where the multiplies are unsigned. +// Note that the returned m is always N+1 bits; umagicN omits the high 1<>N>>s - bool2int(x < 0). +// Here m is an unsigned N-bit number but x is signed. +// +// In general the division cases are: +// +// 1. A signed divide where 2N ≤ the register size. +// This form can use the signed algorithm directly. +// +// 2. A signed divide where m is even. +// This form can use a signed double-width multiply with m/2, +// shifting by s-1. +// +// 3. A signed divide where m is odd. +// This form can use x*m = ((x*(m-2^N))>>N+x) with a signed multiply. +// Since intN(m) is m-2^N < 0, the product and x have different signs, +// so there can be no overflow on the addition. +// +// 4. An unsigned divide where we know x < 1<<(N-1). +// This form can use the signed algorithm without the bool2int fixup, +// and since we know the product is only 2N-1 bits, we can use an +// unsigned multiply to obtain the high N bits directly, regardless +// of whether m is odd or even. +// +// 5. An unsigned divide where 2N+1 ≤ the register size. +// This form uses the unsigned algorithm with an explicit (1<>N>>s = ((x*m)>>N+x)>>s. +// Let hi = (x*m)>>N, so we want (hi+x) >> s = avg(hi, x) >> (s-1). + +// Case 1. Signed divides where 2N ≤ register size. +(Div8 x (Const8 [c])) && smagicOK8(c) => + (Sub8 + (Rsh32x64 + (Mul32 (SignExt8to32 x) (Const32 [int32(smagic8(c).m)])) + (Const64 [8 + smagic8(c).s])) + (Rsh32x64 (SignExt8to32 x) (Const64 [31]))) +(Div16 x (Const16 [c])) && smagicOK16(c) => + (Sub16 + (Rsh32x64 + (Mul32 (SignExt16to32 x) (Const32 [int32(smagic16(c).m)])) + (Const64 [16 + smagic16(c).s])) + (Rsh32x64 (SignExt16to32 x) (Const64 [31]))) +(Div32 x (Const32 [c])) && smagicOK32(c) && config.RegSize == 8 => + (Sub32 + (Rsh64x64 + (Mul64 (SignExt32to64 x) (Const64 [int64(smagic32(c).m)])) + (Const64 [32 + smagic32(c).s])) + (Rsh64x64 (SignExt32to64 x) (Const64 [63]))) + +// Case 2. Signed divides where m is even. +(Div32 x (Const32 [c])) && smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 => + (Sub32 + (Rsh32x64 + (Hmul32 x (Const32 [int32(smagic32(c).m/2)])) + (Const64 [smagic32(c).s - 1])) + (Rsh32x64 x (Const64 [31]))) +(Div64 x (Const64 [c])) && smagicOK64(c) && smagic64(c).m&1 == 0 => + (Sub64 + (Rsh64x64 + (Hmul64 x (Const64 [int64(smagic64(c).m/2)])) + (Const64 [smagic64(c).s - 1])) + (Rsh64x64 x (Const64 [63]))) + +// Case 3. Signed divides where m is odd. +(Div32 x (Const32 [c])) && smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 => + (Sub32 + (Rsh32x64 + (Add32 x (Hmul32 x (Const32 [int32(smagic32(c).m)]))) + (Const64 [smagic32(c).s])) + (Rsh32x64 x (Const64 [31]))) +(Div64 x (Const64 [c])) && smagicOK64(c) && smagic64(c).m&1 != 0 => + (Sub64 + (Rsh64x64 + (Add64 x (Hmul64 x (Const64 [int64(smagic64(c).m)]))) + (Const64 [smagic64(c).s])) + (Rsh64x64 x (Const64 [63]))) + +// Case 4. Unsigned divide where x < 1<<(N-1). +// Skip Div8u since case 5's handling is just as good. +(Div16u x (Const16 [c])) && t.IsSigned() && smagicOK16(c) => + (Rsh32Ux64 + (Mul32 (SignExt16to32 x) (Const32 [int32(smagic16(c).m)])) + (Const64 [16 + smagic16(c).s])) +(Div32u x (Const32 [c])) && t.IsSigned() && smagicOK32(c) && config.RegSize == 8 => + (Rsh64Ux64 + (Mul64 (SignExt32to64 x) (Const64 [int64(smagic32(c).m)])) + (Const64 [32 + smagic32(c).s])) +(Div32u x (Const32 [c])) && t.IsSigned() && smagicOK32(c) && config.RegSize == 4 => + (Rsh32Ux64 + (Hmul32u x (Const32 [int32(smagic32(c).m)])) + (Const64 [smagic32(c).s])) +(Div64u x (Const64 [c])) && t.IsSigned() && smagicOK64(c) => + (Rsh64Ux64 + (Hmul64u x (Const64 [int64(smagic64(c).m)])) + (Const64 [smagic64(c).s])) + +// Case 5. Unsigned divide where 2N+1 ≤ register size. +(Div8u x (Const8 [c])) && umagicOK8(c) => + (Trunc32to8 + (Rsh32Ux64 + (Mul32 (ZeroExt8to32 x) (Const32 [int32(1<<8 + umagic8(c).m)])) + (Const64 [8 + umagic8(c).s]))) +(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 8 => + (Trunc64to16 + (Rsh64Ux64 + (Mul64 (ZeroExt16to64 x) (Const64 [int64(1<<16 + umagic16(c).m)])) + (Const64 [16 + umagic16(c).s]))) + +// Case 6. Unsigned divide where m is even. +(Div16u x (Const16 [c])) && umagicOK16(c) && umagic16(c).m&1 == 0 => + (Trunc32to16 + (Rsh32Ux64 + (Mul32 (ZeroExt16to32 x) (Const32 [int32(1<<15 + umagic16(c).m/2)])) + (Const64 [16 + umagic16(c).s - 1]))) +(Div32u x (Const32 [c])) && umagicOK32(c) && umagic32(c).m&1 == 0 && config.RegSize == 8 => + (Trunc64to32 + (Rsh64Ux64 + (Mul64 (ZeroExt32to64 x) (Const64 [int64(1<<31 + umagic32(c).m/2)])) + (Const64 [32 + umagic32(c).s - 1]))) +(Div32u x (Const32 [c])) && umagicOK32(c) && umagic32(c).m&1 == 0 && config.RegSize == 4 => + (Rsh32Ux64 + (Hmul32u x (Const32 [int32(1<<31 + umagic32(c).m/2)])) + (Const64 [umagic32(c).s - 1])) +(Div64u x (Const64 [c])) && umagicOK64(c) && umagic64(c).m&1 == 0 => + (Rsh64Ux64 + (Hmul64u x (Const64 [int64(1<<63 + umagic64(c).m/2)])) + (Const64 [umagic64(c).s - 1])) + +// Case 7. Unsigned divide where c is even. +(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 && c&1 == 0 => + (Trunc32to16 + (Rsh32Ux64 + (Mul32 + (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1])) + (Const32 [int32(1<<15 + (umagic16(c).m+1)/2)])) + (Const64 [16 + umagic16(c).s - 2]))) +(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 && c&1 == 0 => + (Trunc64to32 + (Rsh64Ux64 + (Mul64 + (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1])) + (Const64 [int64(1<<31 + (umagic32(c).m+1)/2)])) + (Const64 [32 + umagic32(c).s - 2]))) +(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && c&1 == 0 => + (Rsh32Ux64 + (Hmul32u + (Rsh32Ux64 x (Const64 [1])) + (Const32 [int32(1<<31 + (umagic32(c).m+1)/2)])) + (Const64 [umagic32(c).s - 2])) +(Div64u x (Const64 [c])) && umagicOK64(c) && c&1 == 0 => + (Rsh64Ux64 + (Hmul64u + (Rsh64Ux64 x (Const64 [1])) + (Const64 [int64(1<<63 + (umagic64(c).m+1)/2)])) + (Const64 [umagic64(c).s - 2])) + +// Case 8. Unsigned divide using avg. +(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 => + (Trunc32to16 + (Rsh32Ux64 + (Avg32u + (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) + (Mul32 (ZeroExt16to32 x) (Const32 [int32(umagic16(c).m)]))) + (Const64 [16 + umagic16(c).s - 1]))) +(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 => + (Trunc64to32 + (Rsh64Ux64 + (Avg64u + (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) + (Mul64 (ZeroExt32to64 x) (Const64 [int64(umagic32(c).m)]))) + (Const64 [32 + umagic32(c).s - 1]))) +(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 => + (Rsh32Ux64 + (Avg32u x (Hmul32u x (Const32 [int32(umagic32(c).m)]))) + (Const64 [umagic32(c).s - 1])) +(Div64u x (Const64 [c])) && umagicOK64(c) => + (Rsh64Ux64 + (Avg64u x (Hmul64u x (Const64 [int64(umagic64(c).m)]))) + (Const64 [umagic64(c).s - 1])) diff --git a/src/cmd/compile/internal/ssa/_gen/divmodOps.go b/src/cmd/compile/internal/ssa/_gen/divmodOps.go new file mode 100644 index 00000000000..5e85386f78e --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/divmodOps.go @@ -0,0 +1,18 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +var divmodOps = []opData{} + +var divmodBlocks = []blockData{} + +func init() { + archs = append(archs, arch{ + name: "divmod", + ops: divmodOps, + blocks: divmodBlocks, + generic: true, + }) +} diff --git a/src/cmd/compile/internal/ssa/_gen/generic.rules b/src/cmd/compile/internal/ssa/_gen/generic.rules index 78f177468c8..ccdf0bf50d9 100644 --- a/src/cmd/compile/internal/ssa/_gen/generic.rules +++ b/src/cmd/compile/internal/ssa/_gen/generic.rules @@ -50,10 +50,10 @@ (Cvt32to64F (Const32 [c])) => (Const64F [float64(c)]) (Cvt64to32F (Const64 [c])) => (Const32F [float32(c)]) (Cvt64to64F (Const64 [c])) => (Const64F [float64(c)]) -(Cvt32Fto32 (Const32F [c])) => (Const32 [int32(c)]) -(Cvt32Fto64 (Const32F [c])) => (Const64 [int64(c)]) -(Cvt64Fto32 (Const64F [c])) => (Const32 [int32(c)]) -(Cvt64Fto64 (Const64F [c])) => (Const64 [int64(c)]) +(Cvt32Fto32 (Const32F [c])) && c >= -1<<31 && c < 1<<31 => (Const32 [int32(c)]) +(Cvt32Fto64 (Const32F [c])) && c >= -1<<63 && c < 1<<63 => (Const64 [int64(c)]) +(Cvt64Fto32 (Const64F [c])) && c >= -1<<31 && c < 1<<31 => (Const32 [int32(c)]) +(Cvt64Fto64 (Const64F [c])) && c >= -1<<63 && c < 1<<63 => (Const64 [int64(c)]) (Round32F x:(Const32F)) => x (Round64F x:(Const64F)) => x (CvtBoolToUint8 (ConstBool [false])) => (Const8 [0]) @@ -199,16 +199,6 @@ (And(8|16|32|64) (Com(8|16|32|64) x) (Com(8|16|32|64) y)) => (Com(8|16|32|64) (Or(8|16|32|64) x y)) (Or(8|16|32|64) (Com(8|16|32|64) x) (Com(8|16|32|64) y)) => (Com(8|16|32|64) (And(8|16|32|64) x y)) -// Convert multiplication by a power of two to a shift. -(Mul8 n (Const8 [c])) && isPowerOfTwo(c) => (Lsh8x64 n (Const64 [log8(c)])) -(Mul16 n (Const16 [c])) && isPowerOfTwo(c) => (Lsh16x64 n (Const64 [log16(c)])) -(Mul32 n (Const32 [c])) && isPowerOfTwo(c) => (Lsh32x64 n (Const64 [log32(c)])) -(Mul64 n (Const64 [c])) && isPowerOfTwo(c) => (Lsh64x64 n (Const64 [log64(c)])) -(Mul8 n (Const8 [c])) && t.IsSigned() && isPowerOfTwo(-c) => (Neg8 (Lsh8x64 n (Const64 [log8(-c)]))) -(Mul16 n (Const16 [c])) && t.IsSigned() && isPowerOfTwo(-c) => (Neg16 (Lsh16x64 n (Const64 [log16(-c)]))) -(Mul32 n (Const32 [c])) && t.IsSigned() && isPowerOfTwo(-c) => (Neg32 (Lsh32x64 n (Const64 [log32(-c)]))) -(Mul64 n (Const64 [c])) && t.IsSigned() && isPowerOfTwo(-c) => (Neg64 (Lsh64x64 n (Const64 [log64(-c)]))) - (Mod8 (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [c % d]) (Mod16 (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [c % d]) (Mod32 (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [c % d]) @@ -347,6 +337,22 @@ (OrB ((Less|Leq)16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) && uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d) => ((Less|Leq)16U (Const16 [c-d-1]) (Sub16 x (Const16 [d+1]))) (OrB ((Less|Leq)8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) && uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d) => ((Less|Leq)8U (Const8 [c-d-1]) (Sub8 x (Const8 [d+1]))) +// NaN check: ( x != x || x (>|>=|<|<=) c ) -> ( !(c (>=|>|<=|<) x) ) +(OrB (Neq64F x x) ((Less|Leq)64F x y:(Const64F [c]))) => (Not ((Leq|Less)64F y x)) +(OrB (Neq64F x x) ((Less|Leq)64F y:(Const64F [c]) x)) => (Not ((Leq|Less)64F x y)) +(OrB (Neq32F x x) ((Less|Leq)32F x y:(Const32F [c]))) => (Not ((Leq|Less)32F y x)) +(OrB (Neq32F x x) ((Less|Leq)32F y:(Const32F [c]) x)) => (Not ((Leq|Less)32F x y)) + +// NaN check: ( x != x || Abs(x) (>|>=|<|<=) c ) -> ( !(c (>=|>|<=|<) Abs(x) ) +(OrB (Neq64F x x) ((Less|Leq)64F abs:(Abs x) y:(Const64F [c]))) => (Not ((Leq|Less)64F y abs)) +(OrB (Neq64F x x) ((Less|Leq)64F y:(Const64F [c]) abs:(Abs x))) => (Not ((Leq|Less)64F abs y)) + +// NaN check: ( x != x || -x (>|>=|<|<=) c ) -> ( !(c (>=|>|<=|<) -x) ) +(OrB (Neq64F x x) ((Less|Leq)64F neg:(Neg64F x) y:(Const64F [c]))) => (Not ((Leq|Less)64F y neg)) +(OrB (Neq64F x x) ((Less|Leq)64F y:(Const64F [c]) neg:(Neg64F x))) => (Not ((Leq|Less)64F neg y)) +(OrB (Neq32F x x) ((Less|Leq)32F neg:(Neg32F x) y:(Const32F [c]))) => (Not ((Leq|Less)32F y neg)) +(OrB (Neq32F x x) ((Less|Leq)32F y:(Const32F [c]) neg:(Neg32F x))) => (Not ((Leq|Less)32F neg y)) + // Canonicalize x-const to x+(-const) (Sub64 x (Const64 [c])) && x.Op != OpConst64 => (Add64 (Const64 [-c]) x) (Sub32 x (Const32 [c])) && x.Op != OpConst32 => (Add32 (Const32 [-c]) x) @@ -364,13 +370,15 @@ // Distribute multiplication c * (d+x) -> c*d + c*x. Useful for: // a[i].b = ...; a[i+1].b = ... -(Mul64 (Const64 [c]) (Add64 (Const64 [d]) x)) => +// The !isPowerOfTwo is a kludge to keep a[i+1] using an index by a multiply, +// which turns into an index by a shift, which can use a shifted operand on ARM systems. +(Mul64 (Const64 [c]) (Add64 (Const64 [d]) x)) && !isPowerOfTwo(c) => (Add64 (Const64 [c*d]) (Mul64 (Const64 [c]) x)) -(Mul32 (Const32 [c]) (Add32 (Const32 [d]) x)) => +(Mul32 (Const32 [c]) (Add32 (Const32 [d]) x)) && !isPowerOfTwo(c) => (Add32 (Const32 [c*d]) (Mul32 (Const32 [c]) x)) -(Mul16 (Const16 [c]) (Add16 (Const16 [d]) x)) => +(Mul16 (Const16 [c]) (Add16 (Const16 [d]) x)) && !isPowerOfTwo(c) => (Add16 (Const16 [c*d]) (Mul16 (Const16 [c]) x)) -(Mul8 (Const8 [c]) (Add8 (Const8 [d]) x)) => +(Mul8 (Const8 [c]) (Add8 (Const8 [d]) x)) && !isPowerOfTwo(c) => (Add8 (Const8 [c*d]) (Mul8 (Const8 [c]) x)) // Rewrite x*y ± x*z to x*(y±z) @@ -613,6 +621,10 @@ (Sub(64|32|16|8) (Com(64|32|16|8) x) (Neg(64|32|16|8) x)) => (Const(64|32|16|8) [-1]) (Add(64|32|16|8) (Com(64|32|16|8) x) x) => (Const(64|32|16|8) [-1]) +// Prove does not simplify this because x + y might overflow into carry, +// however if no one care about the carry, let it overflow in a normal add. +(Select0 a:(Add64carry x y (Const64 [0]))) && a.Uses == 1 => (Add64 x y) + // Simplification when involving common integer // (t + x) - (t + y) == x - y // (t + x) - (y + t) == x - y @@ -1014,176 +1026,9 @@ // We must ensure that no intermediate computations are invalid pointers. (Convert a:(Add(64|32) (Add(64|32) (Convert ptr mem) off1) off2) mem) => (AddPtr ptr (Add(64|32) off1 off2)) -// strength reduction of divide by a constant. -// See ../magic.go for a detailed description of these algorithms. - -// Unsigned divide by power of 2. Strength reduce to a shift. -(Div8u n (Const8 [c])) && isUnsignedPowerOfTwo(uint8(c)) => (Rsh8Ux64 n (Const64 [log8u(uint8(c))])) -(Div16u n (Const16 [c])) && isUnsignedPowerOfTwo(uint16(c)) => (Rsh16Ux64 n (Const64 [log16u(uint16(c))])) -(Div32u n (Const32 [c])) && isUnsignedPowerOfTwo(uint32(c)) => (Rsh32Ux64 n (Const64 [log32u(uint32(c))])) -(Div64u n (Const64 [c])) && isUnsignedPowerOfTwo(uint64(c)) => (Rsh64Ux64 n (Const64 [log64u(uint64(c))])) - -// Signed non-negative divide by power of 2. -(Div8 n (Const8 [c])) && isNonNegative(n) && isPowerOfTwo(c) => (Rsh8Ux64 n (Const64 [log8(c)])) -(Div16 n (Const16 [c])) && isNonNegative(n) && isPowerOfTwo(c) => (Rsh16Ux64 n (Const64 [log16(c)])) -(Div32 n (Const32 [c])) && isNonNegative(n) && isPowerOfTwo(c) => (Rsh32Ux64 n (Const64 [log32(c)])) -(Div64 n (Const64 [c])) && isNonNegative(n) && isPowerOfTwo(c) => (Rsh64Ux64 n (Const64 [log64(c)])) -(Div64 n (Const64 [-1<<63])) && isNonNegative(n) => (Const64 [0]) - -// Unsigned divide, not a power of 2. Strength reduce to a multiply. -// For 8-bit divides, we just do a direct 9-bit by 8-bit multiply. -(Div8u x (Const8 [c])) && umagicOK8(c) => - (Trunc32to8 - (Rsh32Ux64 - (Mul32 - (Const32 [int32(1<<8+umagic8(c).m)]) - (ZeroExt8to32 x)) - (Const64 [8+umagic8(c).s]))) - -// For 16-bit divides on 64-bit machines, we do a direct 17-bit by 16-bit multiply. -(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 8 => - (Trunc64to16 - (Rsh64Ux64 - (Mul64 - (Const64 [int64(1<<16+umagic16(c).m)]) - (ZeroExt16to64 x)) - (Const64 [16+umagic16(c).s]))) - -// For 16-bit divides on 32-bit machines -(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 && umagic16(c).m&1 == 0 => - (Trunc32to16 - (Rsh32Ux64 - (Mul32 - (Const32 [int32(1<<15+umagic16(c).m/2)]) - (ZeroExt16to32 x)) - (Const64 [16+umagic16(c).s-1]))) -(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 && c&1 == 0 => - (Trunc32to16 - (Rsh32Ux64 - (Mul32 - (Const32 [int32(1<<15+(umagic16(c).m+1)/2)]) - (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1]))) - (Const64 [16+umagic16(c).s-2]))) -(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 && config.useAvg => - (Trunc32to16 - (Rsh32Ux64 - (Avg32u - (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) - (Mul32 - (Const32 [int32(umagic16(c).m)]) - (ZeroExt16to32 x))) - (Const64 [16+umagic16(c).s-1]))) - -// For 32-bit divides on 32-bit machines -(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && umagic32(c).m&1 == 0 && config.useHmul => - (Rsh32Ux64 - (Hmul32u - (Const32 [int32(1<<31+umagic32(c).m/2)]) - x) - (Const64 [umagic32(c).s-1])) -(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && c&1 == 0 && config.useHmul => - (Rsh32Ux64 - (Hmul32u - (Const32 [int32(1<<31+(umagic32(c).m+1)/2)]) - (Rsh32Ux64 x (Const64 [1]))) - (Const64 [umagic32(c).s-2])) -(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && config.useAvg && config.useHmul => - (Rsh32Ux64 - (Avg32u - x - (Hmul32u - (Const32 [int32(umagic32(c).m)]) - x)) - (Const64 [umagic32(c).s-1])) - -// For 32-bit divides on 64-bit machines -// We'll use a regular (non-hi) multiply for this case. -(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 && umagic32(c).m&1 == 0 => - (Trunc64to32 - (Rsh64Ux64 - (Mul64 - (Const64 [int64(1<<31+umagic32(c).m/2)]) - (ZeroExt32to64 x)) - (Const64 [32+umagic32(c).s-1]))) -(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 && c&1 == 0 => - (Trunc64to32 - (Rsh64Ux64 - (Mul64 - (Const64 [int64(1<<31+(umagic32(c).m+1)/2)]) - (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1]))) - (Const64 [32+umagic32(c).s-2]))) -(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 && config.useAvg => - (Trunc64to32 - (Rsh64Ux64 - (Avg64u - (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) - (Mul64 - (Const64 [int64(umagic32(c).m)]) - (ZeroExt32to64 x))) - (Const64 [32+umagic32(c).s-1]))) - -// For unsigned 64-bit divides on 32-bit machines, -// if the constant fits in 16 bits (so that the last term -// fits in 32 bits), convert to three 32-bit divides by a constant. -// -// If 1<<32 = Q * c + R -// and x = hi << 32 + lo -// -// Then x = (hi/c*c + hi%c) << 32 + lo -// = hi/c*c<<32 + hi%c<<32 + lo -// = hi/c*c<<32 + (hi%c)*(Q*c+R) + lo/c*c + lo%c -// = hi/c*c<<32 + (hi%c)*Q*c + lo/c*c + (hi%c*R+lo%c) -// and x / c = (hi/c)<<32 + (hi%c)*Q + lo/c + (hi%c*R+lo%c)/c -(Div64u x (Const64 [c])) && c > 0 && c <= 0xFFFF && umagicOK32(int32(c)) && config.RegSize == 4 && config.useHmul => - (Add64 - (Add64 - (Add64 - (Lsh64x64 - (ZeroExt32to64 - (Div32u - (Trunc64to32 (Rsh64Ux64 x (Const64 [32]))) - (Const32 [int32(c)]))) - (Const64 [32])) - (ZeroExt32to64 (Div32u (Trunc64to32 x) (Const32 [int32(c)])))) - (Mul64 - (ZeroExt32to64 - (Mod32u - (Trunc64to32 (Rsh64Ux64 x (Const64 [32]))) - (Const32 [int32(c)]))) - (Const64 [int64((1<<32)/c)]))) - (ZeroExt32to64 - (Div32u - (Add32 - (Mod32u (Trunc64to32 x) (Const32 [int32(c)])) - (Mul32 - (Mod32u - (Trunc64to32 (Rsh64Ux64 x (Const64 [32]))) - (Const32 [int32(c)])) - (Const32 [int32((1<<32)%c)]))) - (Const32 [int32(c)])))) - -// For 64-bit divides on 64-bit machines -// (64-bit divides on 32-bit machines are lowered to a runtime call by the walk pass.) -(Div64u x (Const64 [c])) && umagicOK64(c) && config.RegSize == 8 && umagic64(c).m&1 == 0 && config.useHmul => - (Rsh64Ux64 - (Hmul64u - (Const64 [int64(1<<63+umagic64(c).m/2)]) - x) - (Const64 [umagic64(c).s-1])) -(Div64u x (Const64 [c])) && umagicOK64(c) && config.RegSize == 8 && c&1 == 0 && config.useHmul => - (Rsh64Ux64 - (Hmul64u - (Const64 [int64(1<<63+(umagic64(c).m+1)/2)]) - (Rsh64Ux64 x (Const64 [1]))) - (Const64 [umagic64(c).s-2])) -(Div64u x (Const64 [c])) && umagicOK64(c) && config.RegSize == 8 && config.useAvg && config.useHmul => - (Rsh64Ux64 - (Avg64u - x - (Hmul64u - (Const64 [int64(umagic64(c).m)]) - x)) - (Const64 [umagic64(c).s-1])) +// Simplification of divisions. +// Only trivial, easily analyzed (by prove) rewrites here. +// Strength reduction of div to mul is delayed to divmod.rules. // Signed divide by a negative constant. Rewrite to divide by a positive constant. (Div8 n (Const8 [c])) && c < 0 && c != -1<<7 => (Neg8 (Div8 n (Const8 [-c]))) @@ -1194,107 +1039,41 @@ // Dividing by the most-negative number. Result is always 0 except // if the input is also the most-negative number. // We can detect that using the sign bit of x & -x. +(Div64 x (Const64 [-1<<63])) && isNonNegative(x) => (Const64 [0]) (Div8 x (Const8 [-1<<7 ])) => (Rsh8Ux64 (And8 x (Neg8 x)) (Const64 [7 ])) (Div16 x (Const16 [-1<<15])) => (Rsh16Ux64 (And16 x (Neg16 x)) (Const64 [15])) (Div32 x (Const32 [-1<<31])) => (Rsh32Ux64 (And32 x (Neg32 x)) (Const64 [31])) (Div64 x (Const64 [-1<<63])) => (Rsh64Ux64 (And64 x (Neg64 x)) (Const64 [63])) -// Signed divide by power of 2. -// n / c = n >> log(c) if n >= 0 -// = (n+c-1) >> log(c) if n < 0 -// We conditionally add c-1 by adding n>>63>>(64-log(c)) (first shift signed, second shift unsigned). -(Div8 n (Const8 [c])) && isPowerOfTwo(c) => - (Rsh8x64 - (Add8 n (Rsh8Ux64 (Rsh8x64 n (Const64 [ 7])) (Const64 [int64( 8-log8(c))]))) - (Const64 [int64(log8(c))])) -(Div16 n (Const16 [c])) && isPowerOfTwo(c) => - (Rsh16x64 - (Add16 n (Rsh16Ux64 (Rsh16x64 n (Const64 [15])) (Const64 [int64(16-log16(c))]))) - (Const64 [int64(log16(c))])) -(Div32 n (Const32 [c])) && isPowerOfTwo(c) => - (Rsh32x64 - (Add32 n (Rsh32Ux64 (Rsh32x64 n (Const64 [31])) (Const64 [int64(32-log32(c))]))) - (Const64 [int64(log32(c))])) -(Div64 n (Const64 [c])) && isPowerOfTwo(c) => - (Rsh64x64 - (Add64 n (Rsh64Ux64 (Rsh64x64 n (Const64 [63])) (Const64 [int64(64-log64(c))]))) - (Const64 [int64(log64(c))])) +// Unsigned divide by power of 2. Strength reduce to a shift. +(Div8u n (Const8 [c])) && isUnsignedPowerOfTwo(uint8(c)) => (Rsh8Ux64 n (Const64 [log8u(uint8(c))])) +(Div16u n (Const16 [c])) && isUnsignedPowerOfTwo(uint16(c)) => (Rsh16Ux64 n (Const64 [log16u(uint16(c))])) +(Div32u n (Const32 [c])) && isUnsignedPowerOfTwo(uint32(c)) => (Rsh32Ux64 n (Const64 [log32u(uint32(c))])) +(Div64u n (Const64 [c])) && isUnsignedPowerOfTwo(uint64(c)) => (Rsh64Ux64 n (Const64 [log64u(uint64(c))])) -// Signed divide, not a power of 2. Strength reduce to a multiply. -(Div8 x (Const8 [c])) && smagicOK8(c) => - (Sub8 - (Rsh32x64 - (Mul32 - (Const32 [int32(smagic8(c).m)]) - (SignExt8to32 x)) - (Const64 [8+smagic8(c).s])) - (Rsh32x64 - (SignExt8to32 x) - (Const64 [31]))) -(Div16 x (Const16 [c])) && smagicOK16(c) => - (Sub16 - (Rsh32x64 - (Mul32 - (Const32 [int32(smagic16(c).m)]) - (SignExt16to32 x)) - (Const64 [16+smagic16(c).s])) - (Rsh32x64 - (SignExt16to32 x) - (Const64 [31]))) -(Div32 x (Const32 [c])) && smagicOK32(c) && config.RegSize == 8 => - (Sub32 - (Rsh64x64 - (Mul64 - (Const64 [int64(smagic32(c).m)]) - (SignExt32to64 x)) - (Const64 [32+smagic32(c).s])) - (Rsh64x64 - (SignExt32to64 x) - (Const64 [63]))) -(Div32 x (Const32 [c])) && smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 && config.useHmul => - (Sub32 - (Rsh32x64 - (Hmul32 - (Const32 [int32(smagic32(c).m/2)]) - x) - (Const64 [smagic32(c).s-1])) - (Rsh32x64 - x - (Const64 [31]))) -(Div32 x (Const32 [c])) && smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 && config.useHmul => - (Sub32 - (Rsh32x64 - (Add32 - (Hmul32 - (Const32 [int32(smagic32(c).m)]) - x) - x) - (Const64 [smagic32(c).s])) - (Rsh32x64 - x - (Const64 [31]))) -(Div64 x (Const64 [c])) && smagicOK64(c) && smagic64(c).m&1 == 0 && config.useHmul => - (Sub64 - (Rsh64x64 - (Hmul64 - (Const64 [int64(smagic64(c).m/2)]) - x) - (Const64 [smagic64(c).s-1])) - (Rsh64x64 - x - (Const64 [63]))) -(Div64 x (Const64 [c])) && smagicOK64(c) && smagic64(c).m&1 != 0 && config.useHmul => - (Sub64 - (Rsh64x64 - (Add64 - (Hmul64 - (Const64 [int64(smagic64(c).m)]) - x) - x) - (Const64 [smagic64(c).s])) - (Rsh64x64 - x - (Const64 [63]))) +// Strength reduce multiplication by a power of two to a shift. +// Excluded from early opt so that prove can recognize mod +// by the x - (x/d)*d pattern. +// (Runs during "middle opt" and "late opt".) +(Mul8 x (Const8 [c])) && isPowerOfTwo(c) && v.Block.Func.pass.name != "opt" => + (Lsh8x64 x (Const64 [log8(c)])) +(Mul16 x (Const16 [c])) && isPowerOfTwo(c) && v.Block.Func.pass.name != "opt" => + (Lsh16x64 x (Const64 [log16(c)])) +(Mul32 x (Const32 [c])) && isPowerOfTwo(c) && v.Block.Func.pass.name != "opt" => + (Lsh32x64 x (Const64 [log32(c)])) +(Mul64 x (Const64 [c])) && isPowerOfTwo(c) && v.Block.Func.pass.name != "opt" => + (Lsh64x64 x (Const64 [log64(c)])) +(Mul8 x (Const8 [c])) && t.IsSigned() && isPowerOfTwo(-c) && v.Block.Func.pass.name != "opt" => + (Neg8 (Lsh8x64 x (Const64 [log8(-c)]))) +(Mul16 x (Const16 [c])) && t.IsSigned() && isPowerOfTwo(-c) && v.Block.Func.pass.name != "opt" => + (Neg16 (Lsh16x64 x (Const64 [log16(-c)]))) +(Mul32 x (Const32 [c])) && t.IsSigned() && isPowerOfTwo(-c) && v.Block.Func.pass.name != "opt" => + (Neg32 (Lsh32x64 x (Const64 [log32(-c)]))) +(Mul64 x (Const64 [c])) && t.IsSigned() && isPowerOfTwo(-c) && v.Block.Func.pass.name != "opt" => + (Neg64 (Lsh64x64 x (Const64 [log64(-c)]))) + +// Strength reduction of mod to div. +// Strength reduction of div to mul is delayed to genericlateopt.rules. // Unsigned mod by power of 2 constant. (Mod8u n (Const8 [c])) && isUnsignedPowerOfTwo(uint8(c)) => (And8 n (Const8 [c-1])) @@ -1303,6 +1082,7 @@ (Mod64u n (Const64 [c])) && isUnsignedPowerOfTwo(uint64(c)) => (And64 n (Const64 [c-1])) // Signed non-negative mod by power of 2 constant. +// TODO: Replace ModN with ModNu in prove. (Mod8 n (Const8 [c])) && isNonNegative(n) && isPowerOfTwo(c) => (And8 n (Const8 [c-1])) (Mod16 n (Const16 [c])) && isNonNegative(n) && isPowerOfTwo(c) => (And16 n (Const16 [c-1])) (Mod32 n (Const32 [c])) && isNonNegative(n) && isPowerOfTwo(c) => (And32 n (Const32 [c-1])) @@ -1326,16 +1106,18 @@ => (Sub32 x (Mul32 (Div32 x (Const32 [c])) (Const32 [c]))) (Mod64 x (Const64 [c])) && x.Op != OpConst64 && (c > 0 || c == -1<<63) => (Sub64 x (Mul64 (Div64 x (Const64 [c])) (Const64 [c]))) -(Mod8u x (Const8 [c])) && x.Op != OpConst8 && c > 0 && umagicOK8( c) +(Mod8u x (Const8 [c])) && x.Op != OpConst8 && c != 0 => (Sub8 x (Mul8 (Div8u x (Const8 [c])) (Const8 [c]))) -(Mod16u x (Const16 [c])) && x.Op != OpConst16 && c > 0 && umagicOK16(c) +(Mod16u x (Const16 [c])) && x.Op != OpConst16 && c != 0 => (Sub16 x (Mul16 (Div16u x (Const16 [c])) (Const16 [c]))) -(Mod32u x (Const32 [c])) && x.Op != OpConst32 && c > 0 && umagicOK32(c) +(Mod32u x (Const32 [c])) && x.Op != OpConst32 && c != 0 => (Sub32 x (Mul32 (Div32u x (Const32 [c])) (Const32 [c]))) -(Mod64u x (Const64 [c])) && x.Op != OpConst64 && c > 0 && umagicOK64(c) +(Mod64u x (Const64 [c])) && x.Op != OpConst64 && c != 0 => (Sub64 x (Mul64 (Div64u x (Const64 [c])) (Const64 [c]))) -// For architectures without rotates on less than 32-bits, promote these checks to 32-bit. +// Set up for mod->mul+rot optimization in genericlateopt.rules. +// For architectures without rotates on less than 32-bits, promote to 32-bit. +// TODO: Also != 0 case? (Eq8 (Mod8u x (Const8 [c])) (Const8 [0])) && x.Op != OpConst8 && udivisibleOK8(c) && !hasSmallRotate(config) => (Eq32 (Mod32u (ZeroExt8to32 x) (Const32 [int32(uint8(c))])) (Const32 [0])) (Eq16 (Mod16u x (Const16 [c])) (Const16 [0])) && x.Op != OpConst16 && udivisibleOK16(c) && !hasSmallRotate(config) => @@ -1345,557 +1127,6 @@ (Eq16 (Mod16 x (Const16 [c])) (Const16 [0])) && x.Op != OpConst16 && sdivisibleOK16(c) && !hasSmallRotate(config) => (Eq32 (Mod32 (SignExt16to32 x) (Const32 [int32(c)])) (Const32 [0])) -// Divisibility checks x%c == 0 convert to multiply and rotate. -// Note, x%c == 0 is rewritten as x == c*(x/c) during the opt pass -// where (x/c) is performed using multiplication with magic constants. -// To rewrite x%c == 0 requires pattern matching the rewritten expression -// and checking that the division by the same constant wasn't already calculated. -// This check is made by counting uses of the magic constant multiplication. -// Note that if there were an intermediate opt pass, this rule could be applied -// directly on the Div op and magic division rewrites could be delayed to late opt. - -// Unsigned divisibility checks convert to multiply and rotate. -(Eq8 x (Mul8 (Const8 [c]) - (Trunc32to8 - (Rsh32Ux64 - mul:(Mul32 - (Const32 [m]) - (ZeroExt8to32 x)) - (Const64 [s]))) - ) -) - && v.Block.Func.pass.name != "opt" && mul.Uses == 1 - && m == int32(1<<8+umagic8(c).m) && s == 8+umagic8(c).s - && x.Op != OpConst8 && udivisibleOK8(c) - => (Leq8U - (RotateLeft8 - (Mul8 - (Const8 [int8(udivisible8(c).m)]) - x) - (Const8 [int8(8-udivisible8(c).k)]) - ) - (Const8 [int8(udivisible8(c).max)]) - ) - -(Eq16 x (Mul16 (Const16 [c]) - (Trunc64to16 - (Rsh64Ux64 - mul:(Mul64 - (Const64 [m]) - (ZeroExt16to64 x)) - (Const64 [s]))) - ) -) - && v.Block.Func.pass.name != "opt" && mul.Uses == 1 - && m == int64(1<<16+umagic16(c).m) && s == 16+umagic16(c).s - && x.Op != OpConst16 && udivisibleOK16(c) - => (Leq16U - (RotateLeft16 - (Mul16 - (Const16 [int16(udivisible16(c).m)]) - x) - (Const16 [int16(16-udivisible16(c).k)]) - ) - (Const16 [int16(udivisible16(c).max)]) - ) - -(Eq16 x (Mul16 (Const16 [c]) - (Trunc32to16 - (Rsh32Ux64 - mul:(Mul32 - (Const32 [m]) - (ZeroExt16to32 x)) - (Const64 [s]))) - ) -) - && v.Block.Func.pass.name != "opt" && mul.Uses == 1 - && m == int32(1<<15+umagic16(c).m/2) && s == 16+umagic16(c).s-1 - && x.Op != OpConst16 && udivisibleOK16(c) - => (Leq16U - (RotateLeft16 - (Mul16 - (Const16 [int16(udivisible16(c).m)]) - x) - (Const16 [int16(16-udivisible16(c).k)]) - ) - (Const16 [int16(udivisible16(c).max)]) - ) - -(Eq16 x (Mul16 (Const16 [c]) - (Trunc32to16 - (Rsh32Ux64 - mul:(Mul32 - (Const32 [m]) - (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1]))) - (Const64 [s]))) - ) -) - && v.Block.Func.pass.name != "opt" && mul.Uses == 1 - && m == int32(1<<15+(umagic16(c).m+1)/2) && s == 16+umagic16(c).s-2 - && x.Op != OpConst16 && udivisibleOK16(c) - => (Leq16U - (RotateLeft16 - (Mul16 - (Const16 [int16(udivisible16(c).m)]) - x) - (Const16 [int16(16-udivisible16(c).k)]) - ) - (Const16 [int16(udivisible16(c).max)]) - ) - -(Eq16 x (Mul16 (Const16 [c]) - (Trunc32to16 - (Rsh32Ux64 - (Avg32u - (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) - mul:(Mul32 - (Const32 [m]) - (ZeroExt16to32 x))) - (Const64 [s]))) - ) -) - && v.Block.Func.pass.name != "opt" && mul.Uses == 1 - && m == int32(umagic16(c).m) && s == 16+umagic16(c).s-1 - && x.Op != OpConst16 && udivisibleOK16(c) - => (Leq16U - (RotateLeft16 - (Mul16 - (Const16 [int16(udivisible16(c).m)]) - x) - (Const16 [int16(16-udivisible16(c).k)]) - ) - (Const16 [int16(udivisible16(c).max)]) - ) - -(Eq32 x (Mul32 (Const32 [c]) - (Rsh32Ux64 - mul:(Hmul32u - (Const32 [m]) - x) - (Const64 [s])) - ) -) - && v.Block.Func.pass.name != "opt" && mul.Uses == 1 - && m == int32(1<<31+umagic32(c).m/2) && s == umagic32(c).s-1 - && x.Op != OpConst32 && udivisibleOK32(c) - => (Leq32U - (RotateLeft32 - (Mul32 - (Const32 [int32(udivisible32(c).m)]) - x) - (Const32 [int32(32-udivisible32(c).k)]) - ) - (Const32 [int32(udivisible32(c).max)]) - ) - -(Eq32 x (Mul32 (Const32 [c]) - (Rsh32Ux64 - mul:(Hmul32u - (Const32 [m]) - (Rsh32Ux64 x (Const64 [1]))) - (Const64 [s])) - ) -) - && v.Block.Func.pass.name != "opt" && mul.Uses == 1 - && m == int32(1<<31+(umagic32(c).m+1)/2) && s == umagic32(c).s-2 - && x.Op != OpConst32 && udivisibleOK32(c) - => (Leq32U - (RotateLeft32 - (Mul32 - (Const32 [int32(udivisible32(c).m)]) - x) - (Const32 [int32(32-udivisible32(c).k)]) - ) - (Const32 [int32(udivisible32(c).max)]) - ) - -(Eq32 x (Mul32 (Const32 [c]) - (Rsh32Ux64 - (Avg32u - x - mul:(Hmul32u - (Const32 [m]) - x)) - (Const64 [s])) - ) -) - && v.Block.Func.pass.name != "opt" && mul.Uses == 1 - && m == int32(umagic32(c).m) && s == umagic32(c).s-1 - && x.Op != OpConst32 && udivisibleOK32(c) - => (Leq32U - (RotateLeft32 - (Mul32 - (Const32 [int32(udivisible32(c).m)]) - x) - (Const32 [int32(32-udivisible32(c).k)]) - ) - (Const32 [int32(udivisible32(c).max)]) - ) - -(Eq32 x (Mul32 (Const32 [c]) - (Trunc64to32 - (Rsh64Ux64 - mul:(Mul64 - (Const64 [m]) - (ZeroExt32to64 x)) - (Const64 [s]))) - ) -) - && v.Block.Func.pass.name != "opt" && mul.Uses == 1 - && m == int64(1<<31+umagic32(c).m/2) && s == 32+umagic32(c).s-1 - && x.Op != OpConst32 && udivisibleOK32(c) - => (Leq32U - (RotateLeft32 - (Mul32 - (Const32 [int32(udivisible32(c).m)]) - x) - (Const32 [int32(32-udivisible32(c).k)]) - ) - (Const32 [int32(udivisible32(c).max)]) - ) - -(Eq32 x (Mul32 (Const32 [c]) - (Trunc64to32 - (Rsh64Ux64 - mul:(Mul64 - (Const64 [m]) - (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1]))) - (Const64 [s]))) - ) -) - && v.Block.Func.pass.name != "opt" && mul.Uses == 1 - && m == int64(1<<31+(umagic32(c).m+1)/2) && s == 32+umagic32(c).s-2 - && x.Op != OpConst32 && udivisibleOK32(c) - => (Leq32U - (RotateLeft32 - (Mul32 - (Const32 [int32(udivisible32(c).m)]) - x) - (Const32 [int32(32-udivisible32(c).k)]) - ) - (Const32 [int32(udivisible32(c).max)]) - ) - -(Eq32 x (Mul32 (Const32 [c]) - (Trunc64to32 - (Rsh64Ux64 - (Avg64u - (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) - mul:(Mul64 - (Const64 [m]) - (ZeroExt32to64 x))) - (Const64 [s]))) - ) -) - && v.Block.Func.pass.name != "opt" && mul.Uses == 1 - && m == int64(umagic32(c).m) && s == 32+umagic32(c).s-1 - && x.Op != OpConst32 && udivisibleOK32(c) - => (Leq32U - (RotateLeft32 - (Mul32 - (Const32 [int32(udivisible32(c).m)]) - x) - (Const32 [int32(32-udivisible32(c).k)]) - ) - (Const32 [int32(udivisible32(c).max)]) - ) - -(Eq64 x (Mul64 (Const64 [c]) - (Rsh64Ux64 - mul:(Hmul64u - (Const64 [m]) - x) - (Const64 [s])) - ) -) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 - && m == int64(1<<63+umagic64(c).m/2) && s == umagic64(c).s-1 - && x.Op != OpConst64 && udivisibleOK64(c) - => (Leq64U - (RotateLeft64 - (Mul64 - (Const64 [int64(udivisible64(c).m)]) - x) - (Const64 [64-udivisible64(c).k]) - ) - (Const64 [int64(udivisible64(c).max)]) - ) -(Eq64 x (Mul64 (Const64 [c]) - (Rsh64Ux64 - mul:(Hmul64u - (Const64 [m]) - (Rsh64Ux64 x (Const64 [1]))) - (Const64 [s])) - ) -) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 - && m == int64(1<<63+(umagic64(c).m+1)/2) && s == umagic64(c).s-2 - && x.Op != OpConst64 && udivisibleOK64(c) - => (Leq64U - (RotateLeft64 - (Mul64 - (Const64 [int64(udivisible64(c).m)]) - x) - (Const64 [64-udivisible64(c).k]) - ) - (Const64 [int64(udivisible64(c).max)]) - ) -(Eq64 x (Mul64 (Const64 [c]) - (Rsh64Ux64 - (Avg64u - x - mul:(Hmul64u - (Const64 [m]) - x)) - (Const64 [s])) - ) -) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 - && m == int64(umagic64(c).m) && s == umagic64(c).s-1 - && x.Op != OpConst64 && udivisibleOK64(c) - => (Leq64U - (RotateLeft64 - (Mul64 - (Const64 [int64(udivisible64(c).m)]) - x) - (Const64 [64-udivisible64(c).k]) - ) - (Const64 [int64(udivisible64(c).max)]) - ) - -// Signed divisibility checks convert to multiply, add and rotate. -(Eq8 x (Mul8 (Const8 [c]) - (Sub8 - (Rsh32x64 - mul:(Mul32 - (Const32 [m]) - (SignExt8to32 x)) - (Const64 [s])) - (Rsh32x64 - (SignExt8to32 x) - (Const64 [31]))) - ) -) - && v.Block.Func.pass.name != "opt" && mul.Uses == 1 - && m == int32(smagic8(c).m) && s == 8+smagic8(c).s - && x.Op != OpConst8 && sdivisibleOK8(c) - => (Leq8U - (RotateLeft8 - (Add8 - (Mul8 - (Const8 [int8(sdivisible8(c).m)]) - x) - (Const8 [int8(sdivisible8(c).a)]) - ) - (Const8 [int8(8-sdivisible8(c).k)]) - ) - (Const8 [int8(sdivisible8(c).max)]) - ) - -(Eq16 x (Mul16 (Const16 [c]) - (Sub16 - (Rsh32x64 - mul:(Mul32 - (Const32 [m]) - (SignExt16to32 x)) - (Const64 [s])) - (Rsh32x64 - (SignExt16to32 x) - (Const64 [31]))) - ) -) - && v.Block.Func.pass.name != "opt" && mul.Uses == 1 - && m == int32(smagic16(c).m) && s == 16+smagic16(c).s - && x.Op != OpConst16 && sdivisibleOK16(c) - => (Leq16U - (RotateLeft16 - (Add16 - (Mul16 - (Const16 [int16(sdivisible16(c).m)]) - x) - (Const16 [int16(sdivisible16(c).a)]) - ) - (Const16 [int16(16-sdivisible16(c).k)]) - ) - (Const16 [int16(sdivisible16(c).max)]) - ) - -(Eq32 x (Mul32 (Const32 [c]) - (Sub32 - (Rsh64x64 - mul:(Mul64 - (Const64 [m]) - (SignExt32to64 x)) - (Const64 [s])) - (Rsh64x64 - (SignExt32to64 x) - (Const64 [63]))) - ) -) - && v.Block.Func.pass.name != "opt" && mul.Uses == 1 - && m == int64(smagic32(c).m) && s == 32+smagic32(c).s - && x.Op != OpConst32 && sdivisibleOK32(c) - => (Leq32U - (RotateLeft32 - (Add32 - (Mul32 - (Const32 [int32(sdivisible32(c).m)]) - x) - (Const32 [int32(sdivisible32(c).a)]) - ) - (Const32 [int32(32-sdivisible32(c).k)]) - ) - (Const32 [int32(sdivisible32(c).max)]) - ) - -(Eq32 x (Mul32 (Const32 [c]) - (Sub32 - (Rsh32x64 - mul:(Hmul32 - (Const32 [m]) - x) - (Const64 [s])) - (Rsh32x64 - x - (Const64 [31]))) - ) -) - && v.Block.Func.pass.name != "opt" && mul.Uses == 1 - && m == int32(smagic32(c).m/2) && s == smagic32(c).s-1 - && x.Op != OpConst32 && sdivisibleOK32(c) - => (Leq32U - (RotateLeft32 - (Add32 - (Mul32 - (Const32 [int32(sdivisible32(c).m)]) - x) - (Const32 [int32(sdivisible32(c).a)]) - ) - (Const32 [int32(32-sdivisible32(c).k)]) - ) - (Const32 [int32(sdivisible32(c).max)]) - ) - -(Eq32 x (Mul32 (Const32 [c]) - (Sub32 - (Rsh32x64 - (Add32 - mul:(Hmul32 - (Const32 [m]) - x) - x) - (Const64 [s])) - (Rsh32x64 - x - (Const64 [31]))) - ) -) - && v.Block.Func.pass.name != "opt" && mul.Uses == 1 - && m == int32(smagic32(c).m) && s == smagic32(c).s - && x.Op != OpConst32 && sdivisibleOK32(c) - => (Leq32U - (RotateLeft32 - (Add32 - (Mul32 - (Const32 [int32(sdivisible32(c).m)]) - x) - (Const32 [int32(sdivisible32(c).a)]) - ) - (Const32 [int32(32-sdivisible32(c).k)]) - ) - (Const32 [int32(sdivisible32(c).max)]) - ) - -(Eq64 x (Mul64 (Const64 [c]) - (Sub64 - (Rsh64x64 - mul:(Hmul64 - (Const64 [m]) - x) - (Const64 [s])) - (Rsh64x64 - x - (Const64 [63]))) - ) -) - && v.Block.Func.pass.name != "opt" && mul.Uses == 1 - && m == int64(smagic64(c).m/2) && s == smagic64(c).s-1 - && x.Op != OpConst64 && sdivisibleOK64(c) - => (Leq64U - (RotateLeft64 - (Add64 - (Mul64 - (Const64 [int64(sdivisible64(c).m)]) - x) - (Const64 [int64(sdivisible64(c).a)]) - ) - (Const64 [64-sdivisible64(c).k]) - ) - (Const64 [int64(sdivisible64(c).max)]) - ) - -(Eq64 x (Mul64 (Const64 [c]) - (Sub64 - (Rsh64x64 - (Add64 - mul:(Hmul64 - (Const64 [m]) - x) - x) - (Const64 [s])) - (Rsh64x64 - x - (Const64 [63]))) - ) -) - && v.Block.Func.pass.name != "opt" && mul.Uses == 1 - && m == int64(smagic64(c).m) && s == smagic64(c).s - && x.Op != OpConst64 && sdivisibleOK64(c) - => (Leq64U - (RotateLeft64 - (Add64 - (Mul64 - (Const64 [int64(sdivisible64(c).m)]) - x) - (Const64 [int64(sdivisible64(c).a)]) - ) - (Const64 [64-sdivisible64(c).k]) - ) - (Const64 [int64(sdivisible64(c).max)]) - ) - -// Divisibility check for signed integers for power of two constant are simple mask. -// However, we must match against the rewritten n%c == 0 -> n - c*(n/c) == 0 -> n == c*(n/c) -// where n/c contains fixup code to handle signed n. -((Eq8|Neq8) n (Lsh8x64 - (Rsh8x64 - (Add8 n (Rsh8Ux64 (Rsh8x64 n (Const64 [ 7])) (Const64 [kbar]))) - (Const64 [k])) - (Const64 [k])) -) && k > 0 && k < 7 && kbar == 8 - k - => ((Eq8|Neq8) (And8 n (Const8 [1< [0])) - -((Eq16|Neq16) n (Lsh16x64 - (Rsh16x64 - (Add16 n (Rsh16Ux64 (Rsh16x64 n (Const64 [15])) (Const64 [kbar]))) - (Const64 [k])) - (Const64 [k])) -) && k > 0 && k < 15 && kbar == 16 - k - => ((Eq16|Neq16) (And16 n (Const16 [1< [0])) - -((Eq32|Neq32) n (Lsh32x64 - (Rsh32x64 - (Add32 n (Rsh32Ux64 (Rsh32x64 n (Const64 [31])) (Const64 [kbar]))) - (Const64 [k])) - (Const64 [k])) -) && k > 0 && k < 31 && kbar == 32 - k - => ((Eq32|Neq32) (And32 n (Const32 [1< [0])) - -((Eq64|Neq64) n (Lsh64x64 - (Rsh64x64 - (Add64 n (Rsh64Ux64 (Rsh64x64 n (Const64 [63])) (Const64 [kbar]))) - (Const64 [k])) - (Const64 [k])) -) && k > 0 && k < 63 && kbar == 64 - k - => ((Eq64|Neq64) (And64 n (Const64 [1< [0])) - (Eq(8|16|32|64) s:(Sub(8|16|32|64) x y) (Const(8|16|32|64) [0])) && s.Uses == 1 => (Eq(8|16|32|64) x y) (Neq(8|16|32|64) s:(Sub(8|16|32|64) x y) (Const(8|16|32|64) [0])) && s.Uses == 1 => (Neq(8|16|32|64) x y) @@ -1905,6 +1136,20 @@ (Neq(8|16|32|64) (And(8|16|32|64) x (Const(8|16|32|64) [y])) (Const(8|16|32|64) [y])) && oneBit(y) => (Eq(8|16|32|64) (And(8|16|32|64) x (Const(8|16|32|64) [y])) (Const(8|16|32|64) [0])) +// Mark newly generated bounded shifts as bounded, for opt passes after prove. +(Lsh64x(8|16|32|64) [false] x con:(Const(8|16|32|64) [c])) && 0 < c && c < 64 => (Lsh64x(8|16|32|64) [true] x con) +(Rsh64x(8|16|32|64) [false] x con:(Const(8|16|32|64) [c])) && 0 < c && c < 64 => (Rsh64x(8|16|32|64) [true] x con) +(Rsh64Ux(8|16|32|64) [false] x con:(Const(8|16|32|64) [c])) && 0 < c && c < 64 => (Rsh64Ux(8|16|32|64) [true] x con) +(Lsh32x(8|16|32|64) [false] x con:(Const(8|16|32|64) [c])) && 0 < c && c < 32 => (Lsh32x(8|16|32|64) [true] x con) +(Rsh32x(8|16|32|64) [false] x con:(Const(8|16|32|64) [c])) && 0 < c && c < 32 => (Rsh32x(8|16|32|64) [true] x con) +(Rsh32Ux(8|16|32|64) [false] x con:(Const(8|16|32|64) [c])) && 0 < c && c < 32 => (Rsh32Ux(8|16|32|64) [true] x con) +(Lsh16x(8|16|32|64) [false] x con:(Const(8|16|32|64) [c])) && 0 < c && c < 16 => (Lsh16x(8|16|32|64) [true] x con) +(Rsh16x(8|16|32|64) [false] x con:(Const(8|16|32|64) [c])) && 0 < c && c < 16 => (Rsh16x(8|16|32|64) [true] x con) +(Rsh16Ux(8|16|32|64) [false] x con:(Const(8|16|32|64) [c])) && 0 < c && c < 16 => (Rsh16Ux(8|16|32|64) [true] x con) +(Lsh8x(8|16|32|64) [false] x con:(Const(8|16|32|64) [c])) && 0 < c && c < 8 => (Lsh8x(8|16|32|64) [true] x con) +(Rsh8x(8|16|32|64) [false] x con:(Const(8|16|32|64) [c])) && 0 < c && c < 8 => (Rsh8x(8|16|32|64) [true] x con) +(Rsh8Ux(8|16|32|64) [false] x con:(Const(8|16|32|64) [c])) && 0 < c && c < 8 => (Rsh8Ux(8|16|32|64) [true] x con) + // Reassociate expressions involving // constants such that constants come first, // exposing obvious constant-folding opportunities. @@ -2049,28 +1294,32 @@ // for rewriting results of some late-expanded rewrites (below) (SelectN [n] m:(MakeResult ___)) => m.Args[n] +// TODO(matloob): Try out having non-zeroing mallocs for prointerless +// memory, and leaving the zeroing here. Then the compiler can remove +// the zeroing if the user has explicit writes to the whole object. + // for late-expanded calls, recognize newobject and remove zeroing and nilchecks -(Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call)) - && isSameCall(call.Aux, "runtime.newobject") +(Zero (SelectN [0] call:(StaticLECall ___)) mem:(SelectN [1] call)) + && isMalloc(call.Aux) => mem -(Store (SelectN [0] call:(StaticLECall _ _)) x mem:(SelectN [1] call)) +(Store (SelectN [0] call:(StaticLECall ___)) x mem:(SelectN [1] call)) && isConstZero(x) - && isSameCall(call.Aux, "runtime.newobject") + && isMalloc(call.Aux) => mem -(Store (OffPtr (SelectN [0] call:(StaticLECall _ _))) x mem:(SelectN [1] call)) +(Store (OffPtr (SelectN [0] call:(StaticLECall ___))) x mem:(SelectN [1] call)) && isConstZero(x) - && isSameCall(call.Aux, "runtime.newobject") + && isMalloc(call.Aux) => mem -(NilCheck ptr:(SelectN [0] call:(StaticLECall _ _)) _) - && isSameCall(call.Aux, "runtime.newobject") +(NilCheck ptr:(SelectN [0] call:(StaticLECall ___)) _) + && isMalloc(call.Aux) && warnRule(fe.Debug_checknil(), v, "removed nil check") => ptr -(NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall _ _))) _) - && isSameCall(call.Aux, "runtime.newobject") +(NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall ___))) _) + && isMalloc(call.Aux) && warnRule(fe.Debug_checknil(), v, "removed nil check") => ptr @@ -2903,7 +2152,7 @@ // we know the underlying type is pointer-ish. (StaticLECall {f} typ_ x y mem) && isSameCall(f, "runtime.efaceeq") - && isDirectType(typ_) + && isDirectAndComparableType(typ_) && clobber(v) => (MakeResult (EqPtr x y) mem) @@ -2911,7 +2160,7 @@ // we know the underlying type is pointer-ish. (StaticLECall {f} itab x y mem) && isSameCall(f, "runtime.ifaceeq") - && isDirectIface(itab) + && isDirectAndComparableIface(itab) && clobber(v) => (MakeResult (EqPtr x y) mem) @@ -2960,3 +2209,17 @@ // if b { x >>= 1 } => x >>= b (CondSelect (Rsh(64|32|16|8)x64 x (Const64 [1])) x bool) => (Rsh(64|32|16|8)x8 [true] x (CvtBoolToUint8 bool)) (CondSelect (Rsh(64|32|16|8)Ux64 x (Const64 [1])) x bool) => (Rsh(64|32|16|8)Ux8 [true] x (CvtBoolToUint8 bool)) + +// bool(int(x)) => x +(Neq8 (CvtBoolToUint8 x) (Const8 [0])) => x +(Neq8 (CvtBoolToUint8 x) (Const8 [1])) => (Not x) +(Eq8 (CvtBoolToUint8 x) (Const8 [1])) => x +(Eq8 (CvtBoolToUint8 x) (Const8 [0])) => (Not x) +(Neq(64|32|16) (ZeroExt8to(64|32|16) (CvtBoolToUint8 x)) (Const(64|32|16) [0])) => x +(Neq(64|32|16) (ZeroExt8to(64|32|16) (CvtBoolToUint8 x)) (Const(64|32|16) [1])) => (Not x) +(Eq(64|32|16) (ZeroExt8to(64|32|16) (CvtBoolToUint8 x)) (Const(64|32|16) [1])) => x +(Eq(64|32|16) (ZeroExt8to(64|32|16) (CvtBoolToUint8 x)) (Const(64|32|16) [0])) => (Not x) +(Neq(64|32|16) (SignExt8to(64|32|16) (CvtBoolToUint8 x)) (Const(64|32|16) [0])) => x +(Neq(64|32|16) (SignExt8to(64|32|16) (CvtBoolToUint8 x)) (Const(64|32|16) [1])) => (Not x) +(Eq(64|32|16) (SignExt8to(64|32|16) (CvtBoolToUint8 x)) (Const(64|32|16) [1])) => x +(Eq(64|32|16) (SignExt8to(64|32|16) (CvtBoolToUint8 x)) (Const(64|32|16) [0])) => (Not x) \ No newline at end of file diff --git a/src/cmd/compile/internal/ssa/_gen/genericOps.go b/src/cmd/compile/internal/ssa/_gen/genericOps.go index 18bd8d7fe9a..ce01f2c0e3d 100644 --- a/src/cmd/compile/internal/ssa/_gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/genericOps.go @@ -16,6 +16,9 @@ package main // are signed or unsigned. var genericOps = []opData{ + // Pseudo-op. + {name: "Last", argLength: -1}, // return last element of tuple; for "let" bindings + // 2-input arithmetic // Types must be consistent with Go typing. Add, for example, must take two values // of the same type and produces that same type. @@ -569,8 +572,9 @@ var genericOps = []opData{ {name: "Int64Hi", argLength: 1, typ: "UInt32"}, // high 32-bit of arg0 {name: "Int64Lo", argLength: 1, typ: "UInt32"}, // low 32-bit of arg0 - {name: "Add32carry", argLength: 2, commutative: true, typ: "(UInt32,Flags)"}, // arg0 + arg1, returns (value, carry) - {name: "Add32withcarry", argLength: 3, commutative: true}, // arg0 + arg1 + arg2, arg2=carry (0 or 1) + {name: "Add32carry", argLength: 2, commutative: true, typ: "(UInt32,Flags)"}, // arg0 + arg1, returns (value, carry) + {name: "Add32withcarry", argLength: 3, commutative: true}, // arg0 + arg1 + arg2, arg2=carry (0 or 1) + {name: "Add32carrywithcarry", argLength: 3, commutative: true, typ: "(UInt32,Flags)"}, // arg0 + arg1 + arg2, arg2=carry, returns (value, carry) {name: "Sub32carry", argLength: 2, typ: "(UInt32,Flags)"}, // arg0 - arg1, returns (value, carry) {name: "Sub32withcarry", argLength: 3}, // arg0 - arg1 - arg2, arg2=carry (0 or 1) diff --git a/src/cmd/compile/internal/ssa/_gen/rulegen.go b/src/cmd/compile/internal/ssa/_gen/rulegen.go index 4299ed17245..3daf3e8605a 100644 --- a/src/cmd/compile/internal/ssa/_gen/rulegen.go +++ b/src/cmd/compile/internal/ssa/_gen/rulegen.go @@ -1284,8 +1284,10 @@ func genResult0(rr *RuleRewrite, arch arch, result string, top, move bool, pos s case 0: case 1: rr.add(stmtf("%s.AddArg(%s)", v, all.String())) - default: + case 2, 3, 4, 5, 6: rr.add(stmtf("%s.AddArg%d(%s)", v, len(args), all.String())) + default: + rr.add(stmtf("%s.AddArgs(%s)", v, all.String())) } if cse != nil { @@ -1326,6 +1328,12 @@ outer: d++ case d > 0 && s[i] == close: d-- + case s[i] == ':': + // ignore spaces after colons + nonsp = true + for i+1 < len(s) && (s[i+1] == ' ' || s[i+1] == '\t') { + i++ + } default: nonsp = true } @@ -1360,7 +1368,7 @@ func extract(val string) (op, typ, auxint, aux string, args []string) { val = val[1 : len(val)-1] // remove () // Split val up into regions. - // Split by spaces/tabs, except those contained in (), {}, [], or <>. + // Split by spaces/tabs, except those contained in (), {}, [], or <> or after colon. s := split(val) // Extract restrictions and args. @@ -1484,7 +1492,7 @@ func splitNameExpr(arg string) (name, expr string) { // colon is inside the parens, such as in "(Foo x:(Bar))". return "", arg } - return arg[:colon], arg[colon+1:] + return arg[:colon], strings.TrimSpace(arg[colon+1:]) } func getBlockInfo(op string, arch arch) (name string, data blockData) { diff --git a/src/cmd/compile/internal/ssa/allocators.go b/src/cmd/compile/internal/ssa/allocators.go index 10b1c582801..a84f409b5ba 100644 --- a/src/cmd/compile/internal/ssa/allocators.go +++ b/src/cmd/compile/internal/ssa/allocators.go @@ -331,3 +331,29 @@ func (c *Cache) freeIDSlice(s []ID) { } c.freeLimitSlice(*(*[]limit)(unsafe.Pointer(&b))) } +func (c *Cache) allocUintSlice(n int) []uint { + var base limit + var derived uint + if unsafe.Sizeof(base)%unsafe.Sizeof(derived) != 0 { + panic("bad") + } + scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived) + b := c.allocLimitSlice(int((uintptr(n) + scale - 1) / scale)) + s := unsafeheader.Slice{ + Data: unsafe.Pointer(&b[0]), + Len: n, + Cap: cap(b) * int(scale), + } + return *(*[]uint)(unsafe.Pointer(&s)) +} +func (c *Cache) freeUintSlice(s []uint) { + var base limit + var derived uint + scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived) + b := unsafeheader.Slice{ + Data: unsafe.Pointer(&s[0]), + Len: int((uintptr(len(s)) + scale - 1) / scale), + Cap: int((uintptr(cap(s)) + scale - 1) / scale), + } + c.freeLimitSlice(*(*[]limit)(unsafe.Pointer(&b))) +} diff --git a/src/cmd/compile/internal/ssa/biasedsparsemap.go b/src/cmd/compile/internal/ssa/biasedsparsemap.go index 25fbaf68625..a8bda831b1f 100644 --- a/src/cmd/compile/internal/ssa/biasedsparsemap.go +++ b/src/cmd/compile/internal/ssa/biasedsparsemap.go @@ -31,7 +31,7 @@ func (s *biasedSparseMap) cap() int { if s == nil || s.s == nil { return 0 } - return s.s.cap() + int(s.first) + return s.s.cap() + s.first } // size returns the number of entries stored in s diff --git a/src/cmd/compile/internal/ssa/block.go b/src/cmd/compile/internal/ssa/block.go index 0ed90b5a7f1..6564b85ec51 100644 --- a/src/cmd/compile/internal/ssa/block.go +++ b/src/cmd/compile/internal/ssa/block.go @@ -427,9 +427,9 @@ func (b *Block) likelyBranch() bool { return true } -func (b *Block) Logf(msg string, args ...interface{}) { b.Func.Logf(msg, args...) } -func (b *Block) Log() bool { return b.Func.Log() } -func (b *Block) Fatalf(msg string, args ...interface{}) { b.Func.Fatalf(msg, args...) } +func (b *Block) Logf(msg string, args ...any) { b.Func.Logf(msg, args...) } +func (b *Block) Log() bool { return b.Func.Log() } +func (b *Block) Fatalf(msg string, args ...any) { b.Func.Fatalf(msg, args...) } type BranchPrediction int8 diff --git a/src/cmd/compile/internal/ssa/branchelim.go b/src/cmd/compile/internal/ssa/branchelim.go index a7d339cad06..8c411b541d4 100644 --- a/src/cmd/compile/internal/ssa/branchelim.go +++ b/src/cmd/compile/internal/ssa/branchelim.go @@ -21,10 +21,7 @@ import "cmd/internal/src" // rewrite Phis in the postdominator as CondSelects. func branchelim(f *Func) { // FIXME: add support for lowering CondSelects on more architectures - switch f.Config.arch { - case "arm64", "ppc64le", "ppc64", "amd64", "wasm", "loong64": - // implemented - default: + if !f.Config.haveCondSelect { return } @@ -73,7 +70,8 @@ func branchelim(f *Func) { } func canCondSelect(v *Value, arch string, loadAddr *sparseSet) bool { - if loadAddr.contains(v.ID) { + if loadAddr != nil && // prove calls this on some multiplies and doesn't take care of loadAddrs + loadAddr.contains(v.ID) { // The result of the soon-to-be conditional move is used to compute a load address. // We want to avoid generating a conditional move in this case // because the load address would now be data-dependent on the condition. diff --git a/src/cmd/compile/internal/ssa/cache.go b/src/cmd/compile/internal/ssa/cache.go index 0c16efcd57d..59d768c34f4 100644 --- a/src/cmd/compile/internal/ssa/cache.go +++ b/src/cmd/compile/internal/ssa/cache.go @@ -29,7 +29,7 @@ type Cache struct { ValueToProgAfter []*obj.Prog debugState debugState - Liveness interface{} // *gc.livenessFuncCache + Liveness any // *gc.livenessFuncCache // Free "headers" for use by the allocators in allocators.go. // Used to put slices in sync.Pools without allocation. diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go index 372d238a1ce..f8cbd1c9a4a 100644 --- a/src/cmd/compile/internal/ssa/compile.go +++ b/src/cmd/compile/internal/ssa/compile.go @@ -461,7 +461,7 @@ var passes = [...]pass{ {name: "short circuit", fn: shortcircuit}, {name: "decompose user", fn: decomposeUser, required: true}, {name: "pre-opt deadcode", fn: deadcode}, - {name: "opt", fn: opt, required: true}, // NB: some generic rules know the name of the opt pass. TODO: split required rules and optimizing rules + {name: "opt", fn: opt, required: true}, {name: "zero arg cse", fn: zcse, required: true}, // required to merge OpSB values {name: "opt deadcode", fn: deadcode, required: true}, // remove any blocks orphaned during opt {name: "generic cse", fn: cse}, @@ -469,12 +469,15 @@ var passes = [...]pass{ {name: "gcse deadcode", fn: deadcode, required: true}, // clean out after cse and phiopt {name: "nilcheckelim", fn: nilcheckelim}, {name: "prove", fn: prove}, + {name: "divisible", fn: divisible, required: true}, + {name: "divmod", fn: divmod, required: true}, + {name: "middle opt", fn: opt, required: true}, {name: "early fuse", fn: fuseEarly}, {name: "expand calls", fn: expandCalls, required: true}, {name: "decompose builtin", fn: postExpandCallsDecompose, required: true}, {name: "softfloat", fn: softfloat, required: true}, {name: "branchelim", fn: branchelim}, - {name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules + {name: "late opt", fn: opt, required: true}, {name: "dead auto elim", fn: elimDeadAutosGeneric}, {name: "sccp", fn: sccp}, {name: "generic deadcode", fn: deadcode, required: true}, // remove dead stores, which otherwise mess up store chain @@ -531,6 +534,12 @@ var passOrder = [...]constraint{ {"generic cse", "prove"}, // deadcode after prove to eliminate all new dead blocks. {"prove", "generic deadcode"}, + // divisible after prove to let prove analyze div and mod + {"prove", "divisible"}, + // divmod after divisible to avoid rewriting subexpressions of ones divisible will handle + {"divisible", "divmod"}, + // divmod before decompose builtin to handle 64-bit on 32-bit systems + {"divmod", "decompose builtin"}, // common-subexpression before dead-store elim, so that we recognize // when two address expressions are the same. {"generic cse", "dse"}, @@ -540,7 +549,7 @@ var passOrder = [...]constraint{ {"nilcheckelim", "generic deadcode"}, // nilcheckelim generates sequences of plain basic blocks {"nilcheckelim", "late fuse"}, - // nilcheckelim relies on opt to rewrite user nil checks + // nilcheckelim relies on the first opt to rewrite user nil checks {"opt", "nilcheckelim"}, // tighten will be most effective when as many values have been removed as possible {"generic deadcode", "tighten"}, @@ -587,7 +596,7 @@ var passOrder = [...]constraint{ {"memcombine", "lower"}, // late opt transform some CondSelects into math. {"branchelim", "late opt"}, - // ranchelim is an arch-independent pass. + // branchelim is an arch-independent pass. {"branchelim", "lower"}, // lower needs cpu feature information (for SIMD) {"cpufeatures", "lower"}, diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go index 90b3cd13c39..3850fbf9616 100644 --- a/src/cmd/compile/internal/ssa/config.go +++ b/src/cmd/compile/internal/ssa/config.go @@ -41,8 +41,6 @@ type Config struct { hasGReg bool // has hardware g register ctxt *obj.Link // Generic arch information optimize bool // Do optimization - useAvg bool // Use optimizations that need Avg* operations - useHmul bool // Use optimizations that need Hmul* operations SoftFloat bool // Race bool // race detector enabled BigEndian bool // @@ -50,6 +48,7 @@ type Config struct { haveBswap64 bool // architecture implements Bswap64 haveBswap32 bool // architecture implements Bswap32 haveBswap16 bool // architecture implements Bswap16 + haveCondSelect bool // architecture implements CondSelect // mulRecipes[x] = function to build v * x from v. mulRecipes map[int64]mulRecipe @@ -135,17 +134,17 @@ func (t *Types) SetTypPtrs() { type Logger interface { // Logf logs a message from the compiler. - Logf(string, ...interface{}) + Logf(string, ...any) // Log reports whether logging is not a no-op // some logging calls account for more than a few heap allocations. Log() bool // Fatalf reports a compiler error and exits. - Fatalf(pos src.XPos, msg string, args ...interface{}) + Fatalf(pos src.XPos, msg string, args ...any) // Warnl writes compiler messages in the form expected by "errorcheck" tests - Warnl(pos src.XPos, fmt_ string, args ...interface{}) + Warnl(pos src.XPos, fmt_ string, args ...any) // Forwards the Debug flags from gc Debug_checknil() bool @@ -175,8 +174,6 @@ type Frontend interface { // NewConfig returns a new configuration object for the given architecture. func NewConfig(arch string, types Types, ctxt *obj.Link, optimize, softfloat bool) *Config { c := &Config{arch: arch, Types: types} - c.useAvg = true - c.useHmul = true switch arch { case "amd64": c.PtrSize = 8 @@ -199,6 +196,7 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize, softfloat boo c.haveBswap64 = true c.haveBswap32 = true c.haveBswap16 = true + c.haveCondSelect = true case "386": c.PtrSize = 4 c.RegSize = 4 @@ -244,6 +242,7 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize, softfloat boo c.haveBswap64 = true c.haveBswap32 = true c.haveBswap16 = true + c.haveCondSelect = true case "ppc64": c.BigEndian = true fallthrough @@ -271,6 +270,7 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize, softfloat boo c.haveBswap64 = true c.haveBswap32 = true c.haveBswap16 = true + c.haveCondSelect = true case "mips64": c.BigEndian = true fallthrough @@ -304,6 +304,7 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize, softfloat boo c.LinkReg = linkRegLOONG64 c.hasGReg = true c.unalignedOK = true + c.haveCondSelect = true case "s390x": c.PtrSize = 8 c.RegSize = 8 @@ -362,8 +363,8 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize, softfloat boo c.FPReg = framepointerRegWasm c.LinkReg = linkRegWasm c.hasGReg = true - c.useAvg = false - c.useHmul = false + c.unalignedOK = true + c.haveCondSelect = true default: ctxt.Diag("arch %s not implemented", arch) } diff --git a/src/cmd/compile/internal/ssa/copyelim_test.go b/src/cmd/compile/internal/ssa/copyelim_test.go index fe31b121916..20e548f7fc9 100644 --- a/src/cmd/compile/internal/ssa/copyelim_test.go +++ b/src/cmd/compile/internal/ssa/copyelim_test.go @@ -20,7 +20,7 @@ func BenchmarkCopyElim100000(b *testing.B) { benchmarkCopyElim(b, 100000) } func benchmarkCopyElim(b *testing.B, n int) { c := testConfig(b) - values := make([]interface{}, 0, n+2) + values := make([]any, 0, n+2) values = append(values, Valu("mem", OpInitMem, types.TypeMem, 0, nil)) last := "mem" for i := 0; i < n; i++ { diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go index d0adff788c0..cdf290e2aa8 100644 --- a/src/cmd/compile/internal/ssa/deadstore.go +++ b/src/cmd/compile/internal/ssa/deadstore.go @@ -203,9 +203,27 @@ func (sr shadowRange) merge(lo, hi int64) shadowRange { // reaches stores then we delete all the stores. The other operations will then // be eliminated by the dead code elimination pass. func elimDeadAutosGeneric(f *Func) { - addr := make(map[*Value]*ir.Name) // values that the address of the auto reaches - elim := make(map[*Value]*ir.Name) // values that could be eliminated if the auto is - var used ir.NameSet // used autos that must be kept + addr := make(map[*Value]*ir.Name) // values that the address of the auto reaches + elim := make(map[*Value]*ir.Name) // values that could be eliminated if the auto is + move := make(map[*ir.Name]ir.NameSet) // for a (Move &y &x _) and y is unused, move[y].Add(x) + var used ir.NameSet // used autos that must be kept + + // Adds a name to used and, when it is the target of a move, also + // propagates the used state to its source. + var usedAdd func(n *ir.Name) bool + usedAdd = func(n *ir.Name) bool { + if used.Has(n) { + return false + } + used.Add(n) + if s := move[n]; s != nil { + delete(move, n) + for n := range s { + usedAdd(n) + } + } + return true + } // visit the value and report whether any of the maps are updated visit := func(v *Value) (changed bool) { @@ -244,10 +262,7 @@ func elimDeadAutosGeneric(f *Func) { if !ok || (n.Class != ir.PAUTO && !isABIInternalParam(f, n)) { return } - if !used.Has(n) { - used.Add(n) - changed = true - } + changed = usedAdd(n) || changed return case OpStore, OpMove, OpZero: // v should be eliminated if we eliminate the auto. @@ -279,10 +294,22 @@ func elimDeadAutosGeneric(f *Func) { if v.Type.IsMemory() || v.Type.IsFlags() || v.Op == OpPhi || v.MemoryArg() != nil { for _, a := range args { if n, ok := addr[a]; ok { - if !used.Has(n) { - used.Add(n) - changed = true + // If the addr of n is used by an OpMove as its source arg, + // and the OpMove's target arg is the addr of a unused name, + // then temporarily treat n as unused, and record in move map. + if nam, ok := elim[v]; ok && v.Op == OpMove && !used.Has(nam) { + if used.Has(n) { + continue + } + s := move[nam] + if s == nil { + s = ir.NameSet{} + move[nam] = s + } + s.Add(n) + continue } + changed = usedAdd(n) || changed } } return @@ -291,17 +318,21 @@ func elimDeadAutosGeneric(f *Func) { // Propagate any auto addresses through v. var node *ir.Name for _, a := range args { - if n, ok := addr[a]; ok && !used.Has(n) { + if n, ok := addr[a]; ok { if node == nil { - node = n - } else if node != n { + if !used.Has(n) { + node = n + } + } else { + if node == n { + continue + } // Most of the time we only see one pointer // reaching an op, but some ops can take // multiple pointers (e.g. NeqPtr, Phi etc.). // This is rare, so just propagate the first // value to keep things simple. - used.Add(n) - changed = true + changed = usedAdd(n) || changed } } } @@ -316,8 +347,7 @@ func elimDeadAutosGeneric(f *Func) { } if addr[v] != node { // This doesn't happen in practice, but catch it just in case. - used.Add(node) - changed = true + changed = usedAdd(node) || changed } return } @@ -336,9 +366,8 @@ func elimDeadAutosGeneric(f *Func) { } // keep the auto if its address reaches a control value for _, c := range b.ControlValues() { - if n, ok := addr[c]; ok && !used.Has(n) { - used.Add(n) - changed = true + if n, ok := addr[c]; ok { + changed = usedAdd(n) || changed } } } diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go index c9a3e4291cc..687abc42cc6 100644 --- a/src/cmd/compile/internal/ssa/debug.go +++ b/src/cmd/compile/internal/ssa/debug.go @@ -195,7 +195,7 @@ type RegisterSet uint64 // logf prints debug-specific logging to stdout (always stdout) if the // current function is tagged by GOSSAFUNC (for ssa output directed // either to stdout or html). -func (s *debugState) logf(msg string, args ...interface{}) { +func (s *debugState) logf(msg string, args ...any) { if s.f.PrintOrHtmlSSA { fmt.Printf(msg, args...) } @@ -1553,11 +1553,11 @@ func (debugInfo *FuncDebug) PutLocationListDwarf4(list []byte, ctxt *obj.Link, l } if ctxt.UseBASEntries { - listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, int64(begin)) - listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, int64(end)) + listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, begin) + listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, end) } else { - listSym.WriteCURelativeAddr(ctxt, listSym.Size, startPC, int64(begin)) - listSym.WriteCURelativeAddr(ctxt, listSym.Size, startPC, int64(end)) + listSym.WriteCURelativeAddr(ctxt, listSym.Size, startPC, begin) + listSym.WriteCURelativeAddr(ctxt, listSym.Size, startPC, end) } i += 2 * ctxt.Arch.PtrSize diff --git a/src/cmd/compile/internal/ssa/decompose.go b/src/cmd/compile/internal/ssa/decompose.go index c3d9997793e..d2f715a4530 100644 --- a/src/cmd/compile/internal/ssa/decompose.go +++ b/src/cmd/compile/internal/ssa/decompose.go @@ -13,14 +13,14 @@ import ( // decompose converts phi ops on compound builtin types into phi // ops on simple types, then invokes rewrite rules to decompose // other ops on those types. -func decomposeBuiltIn(f *Func) { +func decomposeBuiltin(f *Func) { // Decompose phis for _, b := range f.Blocks { for _, v := range b.Values { if v.Op != OpPhi { continue } - decomposeBuiltInPhi(v) + decomposeBuiltinPhi(v) } } @@ -121,7 +121,7 @@ func maybeAppend2(f *Func, ss []*LocalSlot, s1, s2 *LocalSlot) []*LocalSlot { return maybeAppend(f, maybeAppend(f, ss, s1), s2) } -func decomposeBuiltInPhi(v *Value) { +func decomposeBuiltinPhi(v *Value) { switch { case v.Type.IsInteger() && v.Type.Size() > v.Block.Func.Config.RegSize: decomposeInt64Phi(v) diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go index 1f92f7ae807..c1726b27976 100644 --- a/src/cmd/compile/internal/ssa/expand_calls.go +++ b/src/cmd/compile/internal/ssa/expand_calls.go @@ -15,7 +15,7 @@ import ( func postExpandCallsDecompose(f *Func) { decomposeUser(f) // redo user decompose to cleanup after expand calls - decomposeBuiltIn(f) // handles both regular decomposition and cleanup. + decomposeBuiltin(f) // handles both regular decomposition and cleanup. } func expandCalls(f *Func) { @@ -960,7 +960,7 @@ func (x *expandState) indent(n int) { } // Printf does an indented fmt.Printf on the format and args. -func (x *expandState) Printf(format string, a ...interface{}) (n int, err error) { +func (x *expandState) Printf(format string, a ...any) (n int, err error) { if x.indentLevel > 0 { fmt.Printf("%[1]*s", x.indentLevel, "") } diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go index c33c77f891c..3ab0be73113 100644 --- a/src/cmd/compile/internal/ssa/export_test.go +++ b/src/cmd/compile/internal/ssa/export_test.go @@ -98,12 +98,12 @@ func (TestFrontend) UseWriteBarrier() bool { return true // only writebarrier_test cares } -func (d TestFrontend) Logf(msg string, args ...interface{}) { d.t.Logf(msg, args...) } -func (d TestFrontend) Log() bool { return true } +func (d TestFrontend) Logf(msg string, args ...any) { d.t.Logf(msg, args...) } +func (d TestFrontend) Log() bool { return true } -func (d TestFrontend) Fatalf(_ src.XPos, msg string, args ...interface{}) { d.t.Fatalf(msg, args...) } -func (d TestFrontend) Warnl(_ src.XPos, msg string, args ...interface{}) { d.t.Logf(msg, args...) } -func (d TestFrontend) Debug_checknil() bool { return false } +func (d TestFrontend) Fatalf(_ src.XPos, msg string, args ...any) { d.t.Fatalf(msg, args...) } +func (d TestFrontend) Warnl(_ src.XPos, msg string, args ...any) { d.t.Logf(msg, args...) } +func (d TestFrontend) Debug_checknil() bool { return false } func (d TestFrontend) Func() *ir.Func { return d.f diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index 4368252da46..690e2f033d6 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -338,7 +338,7 @@ func (f *Func) newValueNoBlock(op Op, t *types.Type, pos src.XPos) *Value { // context to allow item-by-item comparisons across runs. // For example: // awk 'BEGIN {FS="\t"} $3~/TIME/{sum+=$4} END{print "t(ns)=",sum}' t.log -func (f *Func) LogStat(key string, args ...interface{}) { +func (f *Func) LogStat(key string, args ...any) { value := "" for _, a := range args { value += fmt.Sprintf("\t%v", a) @@ -744,12 +744,12 @@ func (f *Func) ConstOffPtrSP(t *types.Type, c int64, sp *Value) *Value { return v } -func (f *Func) Frontend() Frontend { return f.fe } -func (f *Func) Warnl(pos src.XPos, msg string, args ...interface{}) { f.fe.Warnl(pos, msg, args...) } -func (f *Func) Logf(msg string, args ...interface{}) { f.fe.Logf(msg, args...) } -func (f *Func) Log() bool { return f.fe.Log() } +func (f *Func) Frontend() Frontend { return f.fe } +func (f *Func) Warnl(pos src.XPos, msg string, args ...any) { f.fe.Warnl(pos, msg, args...) } +func (f *Func) Logf(msg string, args ...any) { f.fe.Logf(msg, args...) } +func (f *Func) Log() bool { return f.fe.Log() } -func (f *Func) Fatalf(msg string, args ...interface{}) { +func (f *Func) Fatalf(msg string, args ...any) { stats := "crashed" if f.Log() { f.Logf(" pass %s end %s\n", f.pass.name, stats) diff --git a/src/cmd/compile/internal/ssa/func_test.go b/src/cmd/compile/internal/ssa/func_test.go index 4639d674e14..1a378d4a95f 100644 --- a/src/cmd/compile/internal/ssa/func_test.go +++ b/src/cmd/compile/internal/ssa/func_test.go @@ -206,7 +206,7 @@ func (c *Conf) Fun(entry string, blocs ...bloc) fun { // Bloc defines a block for Fun. The bloc name should be unique // across the containing Fun. entries should consist of calls to valu, // as well as one call to Goto, If, or Exit to specify the block kind. -func Bloc(name string, entries ...interface{}) bloc { +func Bloc(name string, entries ...any) bloc { b := bloc{} b.name = name seenCtrl := false diff --git a/src/cmd/compile/internal/ssa/fuse.go b/src/cmd/compile/internal/ssa/fuse.go index 68defde7b4b..0cee91b532b 100644 --- a/src/cmd/compile/internal/ssa/fuse.go +++ b/src/cmd/compile/internal/ssa/fuse.go @@ -9,8 +9,8 @@ import ( "fmt" ) -// fuseEarly runs fuse(f, fuseTypePlain|fuseTypeIntInRange). -func fuseEarly(f *Func) { fuse(f, fuseTypePlain|fuseTypeIntInRange) } +// fuseEarly runs fuse(f, fuseTypePlain|fuseTypeIntInRange|fuseTypeNanCheck). +func fuseEarly(f *Func) { fuse(f, fuseTypePlain|fuseTypeIntInRange|fuseTypeNanCheck) } // fuseLate runs fuse(f, fuseTypePlain|fuseTypeIf|fuseTypeBranchRedirect). func fuseLate(f *Func) { fuse(f, fuseTypePlain|fuseTypeIf|fuseTypeBranchRedirect) } @@ -21,6 +21,7 @@ const ( fuseTypePlain fuseType = 1 << iota fuseTypeIf fuseTypeIntInRange + fuseTypeNanCheck fuseTypeBranchRedirect fuseTypeShortCircuit ) @@ -38,7 +39,10 @@ func fuse(f *Func, typ fuseType) { changed = fuseBlockIf(b) || changed } if typ&fuseTypeIntInRange != 0 { - changed = fuseIntegerComparisons(b) || changed + changed = fuseIntInRange(b) || changed + } + if typ&fuseTypeNanCheck != 0 { + changed = fuseNanCheck(b) || changed } if typ&fuseTypePlain != 0 { changed = fuseBlockPlain(b) || changed diff --git a/src/cmd/compile/internal/ssa/fuse_comparisons.go b/src/cmd/compile/internal/ssa/fuse_comparisons.go index f5fb84b0d73..b6eb8fcb90d 100644 --- a/src/cmd/compile/internal/ssa/fuse_comparisons.go +++ b/src/cmd/compile/internal/ssa/fuse_comparisons.go @@ -4,21 +4,36 @@ package ssa -// fuseIntegerComparisons optimizes inequalities such as '1 <= x && x < 5', -// which can be optimized to 'unsigned(x-1) < 4'. +// fuseIntInRange transforms integer range checks to remove the short-circuit operator. For example, +// it would convert `if 1 <= x && x < 5 { ... }` into `if (1 <= x) & (x < 5) { ... }`. Rewrite rules +// can then optimize these into unsigned range checks, `if unsigned(x-1) < 4 { ... }` in this case. +func fuseIntInRange(b *Block) bool { + return fuseComparisons(b, canOptIntInRange) +} + +// fuseNanCheck replaces the short-circuit operators between NaN checks and comparisons with +// constants. For example, it would transform `if x != x || x > 1.0 { ... }` into +// `if (x != x) | (x > 1.0) { ... }`. Rewrite rules can then merge the NaN check with the comparison, +// in this case generating `if !(x <= 1.0) { ... }`. +func fuseNanCheck(b *Block) bool { + return fuseComparisons(b, canOptNanCheck) +} + +// fuseComparisons looks for control graphs that match this pattern: // -// Look for branch structure like: -// -// p +// p - predecessor // |\ -// | b +// | b - block // |/ \ -// s0 s1 +// s0 s1 - successors // -// In our example, p has control '1 <= x', b has control 'x < 5', -// and s0 and s1 are the if and else results of the comparison. +// This pattern is typical for if statements such as `if x || y { ... }` and `if x && y { ... }`. // -// This will be optimized into: +// If canOptControls returns true when passed the control values for p and b then fuseComparisons +// will try to convert p into a plain block with only one successor (b) and modify b's control +// value to include p's control value (effectively causing b to be speculatively executed). +// +// This transformation results in a control graph that will now look like this: // // p // \ @@ -26,9 +41,12 @@ package ssa // / \ // s0 s1 // -// where b has the combined control value 'unsigned(x-1) < 4'. // Later passes will then fuse p and b. -func fuseIntegerComparisons(b *Block) bool { +// +// In other words `if x || y { ... }` will become `if x | y { ... }` and `if x && y { ... }` will +// become `if x & y { ... }`. This is a useful transformation because we can then use rewrite +// rules to optimize `x | y` and `x & y`. +func fuseComparisons(b *Block, canOptControls func(a, b *Value, op Op) bool) bool { if len(b.Preds) != 1 { return false } @@ -45,14 +63,6 @@ func fuseIntegerComparisons(b *Block) bool { return false } - // Check if the control values combine to make an integer inequality that - // can be further optimized later. - bc := b.Controls[0] - pc := p.Controls[0] - if !areMergeableInequalities(bc, pc) { - return false - } - // If the first (true) successors match then we have a disjunction (||). // If the second (false) successors match then we have a conjunction (&&). for i, op := range [2]Op{OpOrB, OpAndB} { @@ -60,6 +70,13 @@ func fuseIntegerComparisons(b *Block) bool { continue } + // Check if the control values can be usefully combined. + bc := b.Controls[0] + pc := p.Controls[0] + if !canOptControls(bc, pc, op) { + return false + } + // TODO(mundaym): should we also check the cost of executing b? // Currently we might speculatively execute b even if b contains // a lot of instructions. We could just check that len(b.Values) @@ -125,7 +142,7 @@ func isUnsignedInequality(v *Value) bool { return false } -func areMergeableInequalities(x, y *Value) bool { +func canOptIntInRange(x, y *Value, op Op) bool { // We need both inequalities to be either in the signed or unsigned domain. // TODO(mundaym): it would also be good to merge when we have an Eq op that // could be transformed into a Less/Leq. For example in the unsigned @@ -155,3 +172,60 @@ func areMergeableInequalities(x, y *Value) bool { } return false } + +// canOptNanCheck reports whether one of arguments is a NaN check and the other +// is a comparison with a constant that can be combined together. +// +// Examples (c must be a constant): +// +// v != v || v < c => !(c <= v) +// v != v || v <= c => !(c < v) +// v != v || c < v => !(v <= c) +// v != v || c <= v => !(v < c) +func canOptNanCheck(x, y *Value, op Op) bool { + if op != OpOrB { + return false + } + + for i := 0; i <= 1; i, x, y = i+1, y, x { + if len(x.Args) != 2 || x.Args[0] != x.Args[1] { + continue + } + v := x.Args[0] + switch x.Op { + case OpNeq64F: + if y.Op != OpLess64F && y.Op != OpLeq64F { + return false + } + for j := 0; j <= 1; j++ { + a, b := y.Args[j], y.Args[j^1] + if a.Op != OpConst64F { + continue + } + // Sign bit operations not affect NaN check results. This special case allows us + // to optimize statements like `if v != v || Abs(v) > c { ... }`. + if (b.Op == OpAbs || b.Op == OpNeg64F) && b.Args[0] == v { + return true + } + return b == v + } + case OpNeq32F: + if y.Op != OpLess32F && y.Op != OpLeq32F { + return false + } + for j := 0; j <= 1; j++ { + a, b := y.Args[j], y.Args[j^1] + if a.Op != OpConst32F { + continue + } + // Sign bit operations not affect NaN check results. This special case allows us + // to optimize statements like `if v != v || -v > c { ... }`. + if b.Op == OpNeg32F && b.Args[0] == v { + return true + } + return b == v + } + } + } + return false +} diff --git a/src/cmd/compile/internal/ssa/html.go b/src/cmd/compile/internal/ssa/html.go index 85a414f31e5..7a6683e9f01 100644 --- a/src/cmd/compile/internal/ssa/html.go +++ b/src/cmd/compile/internal/ssa/html.go @@ -53,13 +53,13 @@ func NewHTMLWriter(path string, f *Func, cfgMask string) *HTMLWriter { } // Fatalf reports an error and exits. -func (w *HTMLWriter) Fatalf(msg string, args ...interface{}) { +func (w *HTMLWriter) Fatalf(msg string, args ...any) { fe := w.Func.Frontend() fe.Fatalf(src.NoXPos, msg, args...) } // Logf calls the (w *HTMLWriter).Func's Logf method passing along a msg and args. -func (w *HTMLWriter) Logf(msg string, args ...interface{}) { +func (w *HTMLWriter) Logf(msg string, args ...any) { w.Func.Logf(msg, args...) } @@ -945,7 +945,7 @@ func (w *HTMLWriter) WriteMultiTitleColumn(phase string, titles []string, class, w.WriteString("\n") } -func (w *HTMLWriter) Printf(msg string, v ...interface{}) { +func (w *HTMLWriter) Printf(msg string, v ...any) { if _, err := fmt.Fprintf(w.w, msg, v...); err != nil { w.Fatalf("%v", err) } diff --git a/src/cmd/compile/internal/ssa/magic_test.go b/src/cmd/compile/internal/ssa/magic_test.go index 7c6009dea6c..44177d679e7 100644 --- a/src/cmd/compile/internal/ssa/magic_test.go +++ b/src/cmd/compile/internal/ssa/magic_test.go @@ -33,7 +33,7 @@ func testMagicExhaustive(t *testing.T, n uint) { min := -int64(1) << (n - 1) max := int64(1) << (n - 1) for c := int64(1); c < max; c++ { - if !smagicOK(n, int64(c)) { + if !smagicOK(n, c) { continue } m := int64(smagic(n, c).m) @@ -164,11 +164,11 @@ func TestMagicSigned(t *testing.T) { if c>>(n-1) != 0 { continue // not appropriate for the given n. } - if !smagicOK(n, int64(c)) { + if !smagicOK(n, c) { t.Errorf("expected n=%d c=%d to pass\n", n, c) } - m := smagic(n, int64(c)).m - s := smagic(n, int64(c)).s + m := smagic(n, c).m + s := smagic(n, c).s C := new(big.Int).SetInt64(c) M := new(big.Int).SetUint64(m) @@ -308,13 +308,13 @@ func testDivisibleExhaustive(t *testing.T, n uint) { minI := -int64(1) << (n - 1) maxI := int64(1) << (n - 1) for c := int64(1); c < maxI; c++ { - if !sdivisibleOK(n, int64(c)) { + if !sdivisibleOK(n, c) { continue } - k := sdivisible(n, int64(c)).k - m := sdivisible(n, int64(c)).m - a := sdivisible(n, int64(c)).a - max := sdivisible(n, int64(c)).max + k := sdivisible(n, c).k + m := sdivisible(n, c).m + a := sdivisible(n, c).a + max := sdivisible(n, c).max mask := ^uint64(0) >> (64 - n) for i := minI; i < maxI; i++ { want := i%c == 0 @@ -369,13 +369,13 @@ func TestDivisibleSigned(t *testing.T) { if c>>(n-1) != 0 { continue // not appropriate for the given n. } - if !sdivisibleOK(n, int64(c)) { + if !sdivisibleOK(n, c) { t.Errorf("expected n=%d c=%d to pass\n", n, c) } - k := sdivisible(n, int64(c)).k - m := sdivisible(n, int64(c)).m - a := sdivisible(n, int64(c)).a - max := sdivisible(n, int64(c)).max + k := sdivisible(n, c).k + m := sdivisible(n, c).m + a := sdivisible(n, c).a + max := sdivisible(n, c).max mask := ^uint64(0) >> (64 - n) C := new(big.Int).SetInt64(c) diff --git a/src/cmd/compile/internal/ssa/memcombine.go b/src/cmd/compile/internal/ssa/memcombine.go index b8fcd394959..6b1df7dc099 100644 --- a/src/cmd/compile/internal/ssa/memcombine.go +++ b/src/cmd/compile/internal/ssa/memcombine.go @@ -728,7 +728,7 @@ func combineStores(root *Value) { if isLittleEndian && shift0 != 0 { sv = rightShift(root.Block, root.Pos, sv, shift0) } - shiftedSize = int64(aTotalSize - a[0].size) + shiftedSize = aTotalSize - a[0].size if isBigEndian && shift0-shiftedSize*8 != 0 { sv = rightShift(root.Block, root.Pos, sv, shift0-shiftedSize*8) } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 9d7ee4bea8d..4dd7faeebf3 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -386,6 +386,7 @@ const ( Op386ADDLcarry Op386ADDLconstcarry Op386ADCL + Op386ADCLcarry Op386ADCLconst Op386SUBL Op386SUBLconst @@ -3592,6 +3593,7 @@ const ( OpARMADDSconst OpARMADC OpARMADCconst + OpARMADCS OpARMSUBS OpARMSUBSconst OpARMRSBSconst @@ -4901,6 +4903,8 @@ const ( OpRISCV64REMUW OpRISCV64MOVaddr OpRISCV64MOVDconst + OpRISCV64FMOVDconst + OpRISCV64FMOVFconst OpRISCV64MOVBload OpRISCV64MOVHload OpRISCV64MOVWload @@ -5418,6 +5422,7 @@ const ( OpWasmI64Rotl OpWasmI64Popcnt + OpLast OpAdd8 OpAdd16 OpAdd32 @@ -5752,6 +5757,7 @@ const ( OpInt64Lo OpAdd32carry OpAdd32withcarry + OpAdd32carrywithcarry OpSub32carry OpSub32withcarry OpAdd64carry @@ -7710,6 +7716,24 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "ADCLcarry", + argLen: 3, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: x86.AADCL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + {1, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {1, 0}, + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, { name: "ADCLconst", auxType: auxInt32, @@ -56995,6 +57019,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "ADCS", + argLen: 3, + commutative: true, + asm: arm.AADC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, { name: "SUBS", argLen: 2, @@ -64404,10 +64444,10 @@ var opcodeTable = [...]opInfo{ faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 310378495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R26 R30 - {1, 310378495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R26 R30 + {0, 318767103}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R26 R30 + {1, 318767103}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R26 R30 }, - clobbers: 25165824, // R24 R25 + clobbers: 422212481843200, // R25 F16 F17 }, }, { @@ -64418,10 +64458,10 @@ var opcodeTable = [...]opInfo{ faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 306184191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R26 R30 - {1, 306184191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R26 R30 + {0, 310378495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R26 R30 + {1, 310378495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R26 R30 }, - clobbers: 29360128, // R23 R24 R25 + clobbers: 422212490231808, // R24 R25 F16 F17 clobbersArg0: true, clobbersArg1: true, }, @@ -67580,9 +67620,6 @@ var opcodeTable = [...]opInfo{ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 - }, }, }, { @@ -67597,9 +67634,6 @@ var opcodeTable = [...]opInfo{ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 - }, }, }, { @@ -74673,6 +74707,30 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "FMOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: riscv.AMOVD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVFconst", + auxType: auxFloat32, + argLen: 0, + rematerializeable: true, + asm: riscv.AMOVF, + reg: regInfo{ + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, { name: "MOVBload", auxType: auxSymOff, @@ -81857,6 +81915,11 @@ var opcodeTable = [...]opInfo{ }, }, + { + name: "Last", + argLen: -1, + generic: true, + }, { name: "Add8", argLen: 2, @@ -83709,6 +83772,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "Add32carrywithcarry", + argLen: 3, + commutative: true, + generic: true, + }, { name: "Sub32carry", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/opt.go b/src/cmd/compile/internal/ssa/opt.go index 0f15c3db4a7..9f155e6179b 100644 --- a/src/cmd/compile/internal/ssa/opt.go +++ b/src/cmd/compile/internal/ssa/opt.go @@ -8,3 +8,11 @@ package ssa func opt(f *Func) { applyRewrite(f, rewriteBlockgeneric, rewriteValuegeneric, removeDeadValues) } + +func divisible(f *Func) { + applyRewrite(f, rewriteBlockdivisible, rewriteValuedivisible, removeDeadValues) +} + +func divmod(f *Func) { + applyRewrite(f, rewriteBlockdivmod, rewriteValuedivmod, removeDeadValues) +} diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go index b4f91fd4fd1..4919d6ad370 100644 --- a/src/cmd/compile/internal/ssa/prove.go +++ b/src/cmd/compile/internal/ssa/prove.go @@ -7,9 +7,12 @@ package ssa import ( "cmd/compile/internal/types" "cmd/internal/src" + "cmp" "fmt" "math" "math/bits" + "slices" + "strings" ) type branch int @@ -130,7 +133,7 @@ type limit struct { } func (l limit) String() string { - return fmt.Sprintf("sm,SM,um,UM=%d,%d,%d,%d", l.min, l.max, l.umin, l.umax) + return fmt.Sprintf("sm,SM=%d,%d um,UM=%d,%d", l.min, l.max, l.umin, l.umax) } func (l limit) intersect(l2 limit) limit { @@ -169,6 +172,9 @@ func (l limit) unsignedMinMax(minimum, maximum uint64) limit { func (l limit) nonzero() bool { return l.min > 0 || l.umin > 0 || l.max < 0 } +func (l limit) maybeZero() bool { + return !l.nonzero() +} func (l limit) nonnegative() bool { return l.min >= 0 } @@ -412,6 +418,9 @@ type factsTable struct { // more than one len(s) for a slice. We could keep a list if necessary. lens map[ID]*Value caps map[ID]*Value + + // reusedTopoSortScoresTable recycle allocations for topo-sort + reusedTopoSortScoresTable []uint } // checkpointBound is an invalid value used for checkpointing @@ -1238,6 +1247,176 @@ func (ft *factsTable) cleanup(f *Func) { } f.Cache.freeLimitSlice(ft.limits) f.Cache.freeBoolSlice(ft.recurseCheck) + if cap(ft.reusedTopoSortScoresTable) > 0 { + f.Cache.freeUintSlice(ft.reusedTopoSortScoresTable) + } +} + +// addSlicesOfSameLen finds the slices that are in the same block and whose Op +// is OpPhi and always have the same length, then add the equality relationship +// between them to ft. If two slices start out with the same length and decrease +// in length by the same amount on each round of the loop (or in the if block), +// then we think their lengths are always equal. +// +// See https://go.dev/issues/75144 +// +// In fact, we are just propagating the equality +// +// if len(a) == len(b) { // from here +// for len(a) > 4 { +// a = a[4:] +// b = b[4:] +// } +// if len(a) == len(b) { // to here +// return true +// } +// } +// +// or change the for to if: +// +// if len(a) == len(b) { // from here +// if len(a) > 4 { +// a = a[4:] +// b = b[4:] +// } +// if len(a) == len(b) { // to here +// return true +// } +// } +func addSlicesOfSameLen(ft *factsTable, b *Block) { + // Let w points to the first value we're interested in, and then we + // only process those values ​​that appear to be the same length as w, + // looping only once. This should be enough in most cases. And u is + // similar to w, see comment for predIndex. + var u, w *Value + var i, j, k sliceInfo + isInterested := func(v *Value) bool { + j = getSliceInfo(v) + return j.sliceWhere != sliceUnknown + } + for _, v := range b.Values { + if v.Uses == 0 { + continue + } + if v.Op == OpPhi && len(v.Args) == 2 && ft.lens[v.ID] != nil && isInterested(v) { + if j.predIndex == 1 && ft.lens[v.Args[0].ID] != nil { + // found v = (Phi x (SliceMake _ (Add64 (Const64 [n]) (SliceLen x)) _))) or + // v = (Phi x (SliceMake _ (Add64 (Const64 [n]) (SliceLen v)) _))) + if w == nil { + k = j + w = v + continue + } + // propagate the equality + if j == k && ft.orderS.Equal(ft.lens[v.Args[0].ID], ft.lens[w.Args[0].ID]) { + ft.update(b, ft.lens[v.ID], ft.lens[w.ID], signed, eq) + } + } else if j.predIndex == 0 && ft.lens[v.Args[1].ID] != nil { + // found v = (Phi (SliceMake _ (Add64 (Const64 [n]) (SliceLen x)) _)) x) or + // v = (Phi (SliceMake _ (Add64 (Const64 [n]) (SliceLen v)) _)) x) + if u == nil { + i = j + u = v + continue + } + // propagate the equality + if j == i && ft.orderS.Equal(ft.lens[v.Args[1].ID], ft.lens[u.Args[1].ID]) { + ft.update(b, ft.lens[v.ID], ft.lens[u.ID], signed, eq) + } + } + } + } +} + +type sliceWhere int + +const ( + sliceUnknown sliceWhere = iota + sliceInFor + sliceInIf +) + +// predIndex is used to indicate the branch represented by the predecessor +// block in which the slicing operation occurs. +type predIndex int + +type sliceInfo struct { + lengthDiff int64 + sliceWhere + predIndex +} + +// getSliceInfo returns the negative increment of the slice length in a slice +// operation by examine the Phi node at the merge block. So, we only interest +// in the slice operation if it is inside a for block or an if block. +// Otherwise it returns sliceInfo{0, sliceUnknown, 0}. +// +// For the following for block: +// +// for len(a) > 4 { +// a = a[4:] +// } +// +// vp = (Phi v3 v9) +// v5 = (SliceLen vp) +// v7 = (Add64 (Const64 [-4]) v5) +// v9 = (SliceMake _ v7 _) +// +// returns sliceInfo{-4, sliceInFor, 1} +// +// For a subsequent merge block after an if block: +// +// if len(a) > 4 { +// a = a[4:] +// } +// a // here +// +// vp = (Phi v3 v9) +// v5 = (SliceLen v3) +// v7 = (Add64 (Const64 [-4]) v5) +// v9 = (SliceMake _ v7 _) +// +// returns sliceInfo{-4, sliceInIf, 1} +// +// Returns sliceInfo{0, sliceUnknown, 0} if it is not the slice +// operation we are interested in. +func getSliceInfo(vp *Value) (inf sliceInfo) { + if vp.Op != OpPhi || len(vp.Args) != 2 { + return + } + var i predIndex + var l *Value // length for OpSliceMake + if vp.Args[0].Op != OpSliceMake && vp.Args[1].Op == OpSliceMake { + l = vp.Args[1].Args[1] + i = 1 + } else if vp.Args[0].Op == OpSliceMake && vp.Args[1].Op != OpSliceMake { + l = vp.Args[0].Args[1] + i = 0 + } else { + return + } + var op Op + switch l.Op { + case OpAdd64: + op = OpConst64 + case OpAdd32: + op = OpConst32 + default: + return + } + if l.Args[0].Op == op && l.Args[1].Op == OpSliceLen && l.Args[1].Args[0] == vp { + return sliceInfo{l.Args[0].AuxInt, sliceInFor, i} + } + if l.Args[1].Op == op && l.Args[0].Op == OpSliceLen && l.Args[0].Args[0] == vp { + return sliceInfo{l.Args[1].AuxInt, sliceInFor, i} + } + if l.Args[0].Op == op && l.Args[1].Op == OpSliceLen && l.Args[1].Args[0] == vp.Args[1-i] { + return sliceInfo{l.Args[0].AuxInt, sliceInIf, i} + } + if l.Args[1].Op == op && l.Args[0].Op == OpSliceLen && l.Args[0].Args[0] == vp.Args[1-i] { + return sliceInfo{l.Args[1].AuxInt, sliceInIf, i} + } + return } // prove removes redundant BlockIf branches that can be inferred @@ -1505,6 +1684,9 @@ func prove(f *Func) { addBranchRestrictions(ft, parent, branch) } + // Add slices of the same length start from current block. + addSlicesOfSameLen(ft, node.block) + if ft.unsat { // node.block is unreachable. // Remove it and don't visit @@ -1765,7 +1947,7 @@ func (ft *factsTable) flowLimit(v *Value) bool { a := ft.limits[v.Args[0].ID] b := ft.limits[v.Args[1].ID] sub := ft.newLimit(v, a.sub(b, uint(v.Type.Size())*8)) - mod := ft.detectSignedMod(v) + mod := ft.detectMod(v) inferred := ft.detectSliceLenRelation(v) return sub || mod || inferred case OpNeg64, OpNeg32, OpNeg16, OpNeg8: @@ -1784,19 +1966,30 @@ func (ft *factsTable) flowLimit(v *Value) bool { b := ft.limits[v.Args[1].ID] bitsize := uint(v.Type.Size()) * 8 return ft.newLimit(v, a.mul(b.exp2(bitsize), bitsize)) - case OpMod64, OpMod32, OpMod16, OpMod8: + case OpRsh64x64, OpRsh64x32, OpRsh64x16, OpRsh64x8, + OpRsh32x64, OpRsh32x32, OpRsh32x16, OpRsh32x8, + OpRsh16x64, OpRsh16x32, OpRsh16x16, OpRsh16x8, + OpRsh8x64, OpRsh8x32, OpRsh8x16, OpRsh8x8: a := ft.limits[v.Args[0].ID] b := ft.limits[v.Args[1].ID] - if !(a.nonnegative() && b.nonnegative()) { - // TODO: we could handle signed limits but I didn't bother. - break + if b.min >= 0 { + // Shift of negative makes a value closer to 0 (greater), + // so if a.min is negative, v.min is a.min>>b.min instead of a.min>>b.max, + // and similarly if a.max is negative, v.max is a.max>>b.max. + // Easier to compute min and max of both than to write sign logic. + vmin := min(a.min>>b.min, a.min>>b.max) + vmax := max(a.max>>b.min, a.max>>b.max) + return ft.signedMinMax(v, vmin, vmax) } - fallthrough - case OpMod64u, OpMod32u, OpMod16u, OpMod8u: + case OpRsh64Ux64, OpRsh64Ux32, OpRsh64Ux16, OpRsh64Ux8, + OpRsh32Ux64, OpRsh32Ux32, OpRsh32Ux16, OpRsh32Ux8, + OpRsh16Ux64, OpRsh16Ux32, OpRsh16Ux16, OpRsh16Ux8, + OpRsh8Ux64, OpRsh8Ux32, OpRsh8Ux16, OpRsh8Ux8: a := ft.limits[v.Args[0].ID] b := ft.limits[v.Args[1].ID] - // Underflow in the arithmetic below is ok, it gives to MaxUint64 which does nothing to the limit. - return ft.unsignedMax(v, min(a.umax, b.umax-1)) + if b.min >= 0 { + return ft.unsignedMinMax(v, a.umin>>b.max, a.umax>>b.min) + } case OpDiv64, OpDiv32, OpDiv16, OpDiv8: a := ft.limits[v.Args[0].ID] b := ft.limits[v.Args[1].ID] @@ -1816,91 +2009,12 @@ func (ft *factsTable) flowLimit(v *Value) bool { lim = lim.unsignedMax(a.umax / b.umin) } return ft.newLimit(v, lim) + case OpMod64, OpMod32, OpMod16, OpMod8: + return ft.modLimit(true, v, v.Args[0], v.Args[1]) + case OpMod64u, OpMod32u, OpMod16u, OpMod8u: + return ft.modLimit(false, v, v.Args[0], v.Args[1]) case OpPhi: - { - // Work around for go.dev/issue/68857, look for min(x, y) and max(x, y). - b := v.Block - if len(b.Preds) != 2 { - goto notMinNorMax - } - // FIXME: this code searches for the following losange pattern - // because that what ssagen produce for min and max builtins: - // conditionBlock → (firstBlock, secondBlock) → v.Block - // there are three non losange equivalent constructions - // we could match for, but I didn't bother: - // conditionBlock → (v.Block, secondBlock → v.Block) - // conditionBlock → (firstBlock → v.Block, v.Block) - // conditionBlock → (v.Block, v.Block) - firstBlock, secondBlock := b.Preds[0].b, b.Preds[1].b - if firstBlock.Kind != BlockPlain || secondBlock.Kind != BlockPlain { - goto notMinNorMax - } - if len(firstBlock.Preds) != 1 || len(secondBlock.Preds) != 1 { - goto notMinNorMax - } - conditionBlock := firstBlock.Preds[0].b - if conditionBlock != secondBlock.Preds[0].b { - goto notMinNorMax - } - if conditionBlock.Kind != BlockIf { - goto notMinNorMax - } - - less := conditionBlock.Controls[0] - var unsigned bool - switch less.Op { - case OpLess64U, OpLess32U, OpLess16U, OpLess8U, - OpLeq64U, OpLeq32U, OpLeq16U, OpLeq8U: - unsigned = true - case OpLess64, OpLess32, OpLess16, OpLess8, - OpLeq64, OpLeq32, OpLeq16, OpLeq8: - default: - goto notMinNorMax - } - small, big := less.Args[0], less.Args[1] - truev, falsev := v.Args[0], v.Args[1] - if conditionBlock.Succs[0].b == secondBlock { - truev, falsev = falsev, truev - } - - bigl, smalll := ft.limits[big.ID], ft.limits[small.ID] - if truev == big { - if falsev == small { - // v := big if small <¿=? big else small - if unsigned { - maximum := max(bigl.umax, smalll.umax) - minimum := max(bigl.umin, smalll.umin) - return ft.unsignedMinMax(v, minimum, maximum) - } else { - maximum := max(bigl.max, smalll.max) - minimum := max(bigl.min, smalll.min) - return ft.signedMinMax(v, minimum, maximum) - } - } else { - goto notMinNorMax - } - } else if truev == small { - if falsev == big { - // v := small if small <¿=? big else big - if unsigned { - maximum := min(bigl.umax, smalll.umax) - minimum := min(bigl.umin, smalll.umin) - return ft.unsignedMinMax(v, minimum, maximum) - } else { - maximum := min(bigl.max, smalll.max) - minimum := min(bigl.min, smalll.min) - return ft.signedMinMax(v, minimum, maximum) - } - } else { - goto notMinNorMax - } - } else { - goto notMinNorMax - } - } - notMinNorMax: - // Compute the union of all the input phis. // Often this will convey no information, because the block // is not dominated by its predecessors and hence the @@ -1923,32 +2037,6 @@ func (ft *factsTable) flowLimit(v *Value) bool { return false } -// See if we can get any facts because v is the result of signed mod by a constant. -// The mod operation has already been rewritten, so we have to try and reconstruct it. -// -// x % d -// -// is rewritten as -// -// x - (x / d) * d -// -// furthermore, the divide itself gets rewritten. If d is a power of 2 (d == 1<> k) << k -// = (x + adj) & (-1<>(w-1))>>>(w-k)) & (-1<> = signed shift, >>> = unsigned shift). - // See ./_gen/generic.rules, search for "Signed divide by power of 2". - - var w int64 - var addOp, andOp, constOp, sshiftOp, ushiftOp Op +// x%d has been rewritten to x - (x/d)*d. +func (ft *factsTable) detectMod(v *Value) bool { + var opDiv, opDivU, opMul, opConst Op switch v.Op { case OpSub64: - w = 64 - addOp = OpAdd64 - andOp = OpAnd64 - constOp = OpConst64 - sshiftOp = OpRsh64x64 - ushiftOp = OpRsh64Ux64 + opDiv = OpDiv64 + opDivU = OpDiv64u + opMul = OpMul64 + opConst = OpConst64 case OpSub32: - w = 32 - addOp = OpAdd32 - andOp = OpAnd32 - constOp = OpConst32 - sshiftOp = OpRsh32x64 - ushiftOp = OpRsh32Ux64 + opDiv = OpDiv32 + opDivU = OpDiv32u + opMul = OpMul32 + opConst = OpConst32 case OpSub16: - w = 16 - addOp = OpAdd16 - andOp = OpAnd16 - constOp = OpConst16 - sshiftOp = OpRsh16x64 - ushiftOp = OpRsh16Ux64 + opDiv = OpDiv16 + opDivU = OpDiv16u + opMul = OpMul16 + opConst = OpConst16 case OpSub8: - w = 8 - addOp = OpAdd8 - andOp = OpAnd8 - constOp = OpConst8 - sshiftOp = OpRsh8x64 - ushiftOp = OpRsh8Ux64 - default: - return false + opDiv = OpDiv8 + opDivU = OpDiv8u + opMul = OpMul8 + opConst = OpConst8 } - x := v.Args[0] - and := v.Args[1] - if and.Op != andOp { + mul := v.Args[1] + if mul.Op != opMul { return false } - var add, mask *Value - if and.Args[0].Op == addOp && and.Args[1].Op == constOp { - add = and.Args[0] - mask = and.Args[1] - } else if and.Args[1].Op == addOp && and.Args[0].Op == constOp { - add = and.Args[1] - mask = and.Args[0] - } else { - return false - } - var ushift *Value - if add.Args[0] == x { - ushift = add.Args[1] - } else if add.Args[1] == x { - ushift = add.Args[0] - } else { - return false - } - if ushift.Op != ushiftOp { - return false - } - if ushift.Args[1].Op != OpConst64 { - return false - } - k := w - ushift.Args[1].AuxInt // Now we know k! - d := int64(1) << k // divisor - sshift := ushift.Args[0] - if sshift.Op != sshiftOp { - return false - } - if sshift.Args[0] != x { - return false - } - if sshift.Args[1].Op != OpConst64 || sshift.Args[1].AuxInt != w-1 { - return false - } - if mask.AuxInt != -d { + div, con := mul.Args[0], mul.Args[1] + if div.Op == opConst { + div, con = con, div + } + if con.Op != opConst || (div.Op != opDiv && div.Op != opDivU) || div.Args[0] != v.Args[0] || div.Args[1].Op != opConst || div.Args[1].AuxInt != con.AuxInt { return false } + return ft.modLimit(div.Op == opDiv, v, v.Args[0], con) +} - // All looks ok. x % d is at most +/- d-1. - return ft.signedMinMax(v, -d+1, d-1) +// modLimit sets v with facts derived from v = p % q. +func (ft *factsTable) modLimit(signed bool, v, p, q *Value) bool { + a := ft.limits[p.ID] + b := ft.limits[q.ID] + if signed { + if a.min < 0 && b.min > 0 { + return ft.signedMinMax(v, -(b.max - 1), b.max-1) + } + if !(a.nonnegative() && b.nonnegative()) { + // TODO: we could handle signed limits but I didn't bother. + return false + } + if a.min >= 0 && b.min > 0 { + ft.setNonNegative(v) + } + } + // Underflow in the arithmetic below is ok, it gives to MaxUint64 which does nothing to the limit. + return ft.unsignedMax(v, min(a.umax, b.umax-1)) } // getBranch returns the range restrictions added by p @@ -2308,63 +2358,53 @@ func checkForChunkedIndexBounds(ft *factsTable, b *Block, index, bound *Value, i } func addLocalFacts(ft *factsTable, b *Block) { - // Propagate constant ranges among values in this block. - // We do this before the second loop so that we have the - // most up-to-date constant bounds for isNonNegative calls. - for { - changed := false - for _, v := range b.Values { - changed = ft.flowLimit(v) || changed - } - if !changed { - break - } - } + ft.topoSortValuesInBlock(b) - // Add facts about individual operations. for _, v := range b.Values { - // FIXME(go.dev/issue/68857): this loop only set up limits properly when b.Values is in topological order. - // flowLimit can also depend on limits given by this loop which right now is not handled. + // Propagate constant ranges before relative relations to get + // the most up-to-date constant bounds for isNonNegative calls. + ft.flowLimit(v) + switch v.Op { case OpAdd64, OpAdd32, OpAdd16, OpAdd8: x := ft.limits[v.Args[0].ID] y := ft.limits[v.Args[1].ID] if !unsignedAddOverflows(x.umax, y.umax, v.Type) { r := gt - if !x.nonzero() { + if x.maybeZero() { r |= eq } ft.update(b, v, v.Args[1], unsigned, r) r = gt - if !y.nonzero() { + if y.maybeZero() { r |= eq } ft.update(b, v, v.Args[0], unsigned, r) } if x.min >= 0 && !signedAddOverflowsOrUnderflows(x.max, y.max, v.Type) { r := gt - if !x.nonzero() { + if x.maybeZero() { r |= eq } ft.update(b, v, v.Args[1], signed, r) } if y.min >= 0 && !signedAddOverflowsOrUnderflows(x.max, y.max, v.Type) { r := gt - if !y.nonzero() { + if y.maybeZero() { r |= eq } ft.update(b, v, v.Args[0], signed, r) } if x.max <= 0 && !signedAddOverflowsOrUnderflows(x.min, y.min, v.Type) { r := lt - if !x.nonzero() { + if x.maybeZero() { r |= eq } ft.update(b, v, v.Args[1], signed, r) } if y.max <= 0 && !signedAddOverflowsOrUnderflows(x.min, y.min, v.Type) { r := lt - if !y.nonzero() { + if y.maybeZero() { r |= eq } ft.update(b, v, v.Args[0], signed, r) @@ -2374,7 +2414,7 @@ func addLocalFacts(ft *factsTable, b *Block) { y := ft.limits[v.Args[1].ID] if !unsignedSubUnderflows(x.umin, y.umax) { r := lt - if !y.nonzero() { + if y.maybeZero() { r |= eq } ft.update(b, v, v.Args[0], unsigned, r) @@ -2393,12 +2433,56 @@ func addLocalFacts(ft *factsTable, b *Block) { // TODO: investigate how to always add facts without much slowdown, see issue #57959 //ft.update(b, v, v.Args[0], unsigned, gt|eq) //ft.update(b, v, v.Args[1], unsigned, gt|eq) + case OpDiv64, OpDiv32, OpDiv16, OpDiv8: + if ft.isNonNegative(v.Args[0]) && ft.isNonNegative(v.Args[1]) { + ft.update(b, v, v.Args[0], unsigned, lt|eq) + } case OpDiv64u, OpDiv32u, OpDiv16u, OpDiv8u, OpRsh8Ux64, OpRsh8Ux32, OpRsh8Ux16, OpRsh8Ux8, OpRsh16Ux64, OpRsh16Ux32, OpRsh16Ux16, OpRsh16Ux8, OpRsh32Ux64, OpRsh32Ux32, OpRsh32Ux16, OpRsh32Ux8, OpRsh64Ux64, OpRsh64Ux32, OpRsh64Ux16, OpRsh64Ux8: + switch add := v.Args[0]; add.Op { + // round-up division pattern; given: + // v = (x + y) / z + // if y < z then v <= x + case OpAdd64, OpAdd32, OpAdd16, OpAdd8: + z := v.Args[1] + zl := ft.limits[z.ID] + var uminDivisor uint64 + switch v.Op { + case OpDiv64u, OpDiv32u, OpDiv16u, OpDiv8u: + uminDivisor = zl.umin + case OpRsh8Ux64, OpRsh8Ux32, OpRsh8Ux16, OpRsh8Ux8, + OpRsh16Ux64, OpRsh16Ux32, OpRsh16Ux16, OpRsh16Ux8, + OpRsh32Ux64, OpRsh32Ux32, OpRsh32Ux16, OpRsh32Ux8, + OpRsh64Ux64, OpRsh64Ux32, OpRsh64Ux16, OpRsh64Ux8: + uminDivisor = 1 << zl.umin + default: + panic("unreachable") + } + + x := add.Args[0] + xl := ft.limits[x.ID] + y := add.Args[1] + yl := ft.limits[y.ID] + if unsignedAddOverflows(xl.umax, yl.umax, add.Type) { + continue + } + + if xl.umax < uminDivisor { + ft.update(b, v, y, unsigned, lt|eq) + } + if yl.umax < uminDivisor { + ft.update(b, v, x, unsigned, lt|eq) + } + } ft.update(b, v, v.Args[0], unsigned, lt|eq) + case OpMod64, OpMod32, OpMod16, OpMod8: + if !ft.isNonNegative(v.Args[0]) || !ft.isNonNegative(v.Args[1]) { + break + } + fallthrough case OpMod64u, OpMod32u, OpMod16u, OpMod8u: ft.update(b, v, v.Args[0], unsigned, lt|eq) // Note: we have to be careful that this doesn't imply @@ -2513,20 +2597,66 @@ func addLocalFactsPhi(ft *factsTable, v *Value) { ft.update(b, v, y, dom, rel) } -var ctzNonZeroOp = map[Op]Op{OpCtz8: OpCtz8NonZero, OpCtz16: OpCtz16NonZero, OpCtz32: OpCtz32NonZero, OpCtz64: OpCtz64NonZero} +var ctzNonZeroOp = map[Op]Op{ + OpCtz8: OpCtz8NonZero, + OpCtz16: OpCtz16NonZero, + OpCtz32: OpCtz32NonZero, + OpCtz64: OpCtz64NonZero, +} var mostNegativeDividend = map[Op]int64{ OpDiv16: -1 << 15, OpMod16: -1 << 15, OpDiv32: -1 << 31, OpMod32: -1 << 31, OpDiv64: -1 << 63, - OpMod64: -1 << 63} + OpMod64: -1 << 63, +} +var unsignedOp = map[Op]Op{ + OpDiv8: OpDiv8u, + OpDiv16: OpDiv16u, + OpDiv32: OpDiv32u, + OpDiv64: OpDiv64u, + OpMod8: OpMod8u, + OpMod16: OpMod16u, + OpMod32: OpMod32u, + OpMod64: OpMod64u, +} + +var bytesizeToConst = [...]Op{ + 8 / 8: OpConst8, + 16 / 8: OpConst16, + 32 / 8: OpConst32, + 64 / 8: OpConst64, +} +var bytesizeToNeq = [...]Op{ + 8 / 8: OpNeq8, + 16 / 8: OpNeq16, + 32 / 8: OpNeq32, + 64 / 8: OpNeq64, +} +var bytesizeToAnd = [...]Op{ + 8 / 8: OpAnd8, + 16 / 8: OpAnd16, + 32 / 8: OpAnd32, + 64 / 8: OpAnd64, +} // simplifyBlock simplifies some constant values in b and evaluates // branches to non-uniquely dominated successors of b. func simplifyBlock(sdom SparseTree, ft *factsTable, b *Block) { - for _, v := range b.Values { + for iv, v := range b.Values { switch v.Op { + case OpStaticLECall: + if b.Func.pass.debug > 0 && len(v.Args) == 2 { + fn := auxToCall(v.Aux).Fn + if fn != nil && strings.Contains(fn.String(), "prove") { + // Print bounds of any argument to single-arg function with "prove" in name, + // for debugging and especially for test/prove.go. + // (v.Args[1] is mem). + x := v.Args[0] + b.Func.Warnl(v.Pos, "Proved %v (%v)", ft.limits[x.ID], x) + } + } case OpSlicemask: // Replace OpSlicemask operations in b with constants where possible. cap := v.Args[0] @@ -2576,32 +2706,8 @@ func simplifyBlock(sdom SparseTree, ft *factsTable, b *Block) { case OpRsh8x8, OpRsh8x16, OpRsh8x32, OpRsh8x64, OpRsh16x8, OpRsh16x16, OpRsh16x32, OpRsh16x64, OpRsh32x8, OpRsh32x16, OpRsh32x32, OpRsh32x64, - OpRsh64x8, OpRsh64x16, OpRsh64x32, OpRsh64x64: - // Check whether, for a >> b, we know that a is non-negative - // and b is all of a's bits except the MSB. If so, a is shifted to zero. - bits := 8 * v.Args[0].Type.Size() - if v.Args[1].isGenericIntConst() && v.Args[1].AuxInt >= bits-1 && ft.isNonNegative(v.Args[0]) { - if b.Func.pass.debug > 0 { - b.Func.Warnl(v.Pos, "Proved %v shifts to zero", v.Op) - } - switch bits { - case 64: - v.reset(OpConst64) - case 32: - v.reset(OpConst32) - case 16: - v.reset(OpConst16) - case 8: - v.reset(OpConst8) - default: - panic("unexpected integer size") - } - v.AuxInt = 0 - break // Be sure not to fallthrough - this is no longer OpRsh. - } - // If the Rsh hasn't been replaced with 0, still check if it is bounded. - fallthrough - case OpLsh8x8, OpLsh8x16, OpLsh8x32, OpLsh8x64, + OpRsh64x8, OpRsh64x16, OpRsh64x32, OpRsh64x64, + OpLsh8x8, OpLsh8x16, OpLsh8x32, OpLsh8x64, OpLsh16x8, OpLsh16x16, OpLsh16x32, OpLsh16x64, OpLsh32x8, OpLsh32x16, OpLsh32x32, OpLsh32x64, OpLsh64x8, OpLsh64x16, OpLsh64x32, OpLsh64x64, @@ -2620,30 +2726,85 @@ func simplifyBlock(sdom SparseTree, ft *factsTable, b *Block) { b.Func.Warnl(v.Pos, "Proved %v bounded", v.Op) } } - case OpDiv16, OpDiv32, OpDiv64, OpMod16, OpMod32, OpMod64: - // On amd64 and 386 fix-up code can be avoided if we know - // the divisor is not -1 or the dividend > MinIntNN. - // Don't modify AuxInt on other architectures, - // as that can interfere with CSE. - // TODO: add other architectures? - if b.Func.Config.arch != "386" && b.Func.Config.arch != "amd64" { + case OpDiv8, OpDiv16, OpDiv32, OpDiv64, OpMod8, OpMod16, OpMod32, OpMod64: + p, q := ft.limits[v.Args[0].ID], ft.limits[v.Args[1].ID] // p/q + if p.nonnegative() && q.nonnegative() { + if b.Func.pass.debug > 0 { + b.Func.Warnl(v.Pos, "Proved %v is unsigned", v.Op) + } + v.Op = unsignedOp[v.Op] + v.AuxInt = 0 break } - divr := v.Args[1] - divrLim := ft.limits[divr.ID] - divd := v.Args[0] - divdLim := ft.limits[divd.ID] - if divrLim.max < -1 || divrLim.min > -1 || divdLim.min > mostNegativeDividend[v.Op] { + // Fixup code can be avoided on x86 if we know + // the divisor is not -1 or the dividend > MinIntNN. + if v.Op != OpDiv8 && v.Op != OpMod8 && (q.max < -1 || q.min > -1 || p.min > mostNegativeDividend[v.Op]) { // See DivisionNeedsFixUp in rewrite.go. - // v.AuxInt = 1 means we have proved both that the divisor is not -1 - // and that the dividend is not the most negative integer, + // v.AuxInt = 1 means we have proved that the divisor is not -1 + // or that the dividend is not the most negative integer, // so we do not need to add fix-up code. - v.AuxInt = 1 if b.Func.pass.debug > 0 { b.Func.Warnl(v.Pos, "Proved %v does not need fix-up", v.Op) } + // Only usable on amd64 and 386, and only for ≥ 16-bit ops. + // Don't modify AuxInt on other architectures, as that can interfere with CSE. + // (Print the debug info above always, so that test/prove.go can be + // checked on non-x86 systems.) + // TODO: add other architectures? + if b.Func.Config.arch == "386" || b.Func.Config.arch == "amd64" { + v.AuxInt = 1 + } + } + case OpMul64, OpMul32, OpMul16, OpMul8: + if vl := ft.limits[v.ID]; vl.min == vl.max || vl.umin == vl.umax { + // v is going to be constant folded away; don't "optimize" it. + break + } + x := v.Args[0] + xl := ft.limits[x.ID] + y := v.Args[1] + yl := ft.limits[y.ID] + if xl.umin == xl.umax && isPowerOfTwo(int64(xl.umin)) || + xl.min == xl.max && isPowerOfTwo(xl.min) || + yl.umin == yl.umax && isPowerOfTwo(int64(yl.umin)) || + yl.min == yl.max && isPowerOfTwo(yl.min) { + // 0,1 * a power of two is better done as a shift + break + } + switch xOne, yOne := xl.umax <= 1, yl.umax <= 1; { + case xOne && yOne: + v.Op = bytesizeToAnd[v.Type.Size()] + if b.Func.pass.debug > 0 { + b.Func.Warnl(v.Pos, "Rewrote Mul %v into And", v) + } + case yOne && b.Func.Config.haveCondSelect: + x, y = y, x + fallthrough + case xOne && b.Func.Config.haveCondSelect: + if !canCondSelect(v, b.Func.Config.arch, nil) { + break + } + zero := b.Func.constVal(bytesizeToConst[v.Type.Size()], v.Type, 0, true) + ft.initLimitForNewValue(zero) + check := b.NewValue2(v.Pos, bytesizeToNeq[v.Type.Size()], types.Types[types.TBOOL], zero, x) + ft.initLimitForNewValue(check) + v.reset(OpCondSelect) + v.AddArg3(y, zero, check) + + // FIXME: workaround for go.dev/issues/76060 + // we need to schedule the Neq before the CondSelect even tho + // scheduling is meaningless until we reach the schedule pass. + if b.Values[len(b.Values)-1] != check { + panic("unreachable; failed sanity check, new value isn't at the end of the block") + } + b.Values[iv], b.Values[len(b.Values)-1] = b.Values[len(b.Values)-1], b.Values[iv] + + if b.Func.pass.debug > 0 { + b.Func.Warnl(v.Pos, "Rewrote Mul %v into CondSelect; %v is bool", v, x) + } } } + // Fold provable constant results. // Helps in cases where we reuse a value after branching on its equality. for i, arg := range v.Args { @@ -2803,3 +2964,57 @@ func isCleanExt(v *Value) bool { } return false } + +func getDependencyScore(scores []uint, v *Value) (score uint) { + if score = scores[v.ID]; score != 0 { + return score + } + defer func() { + scores[v.ID] = score + }() + if v.Op == OpPhi { + return 1 + } + score = 2 // NIT(@Jorropo): always order phis first to make GOSSAFUNC pretty. + for _, a := range v.Args { + if a.Block != v.Block { + continue + } + score = max(score, getDependencyScore(scores, a)+1) + } + return score +} + +// topoSortValuesInBlock ensure ranging over b.Values visit values before they are being used. +// It does not consider dependencies with other blocks; thus Phi nodes are considered to not have any dependecies. +// The result is always determistic and does not depend on the previous slice ordering. +func (ft *factsTable) topoSortValuesInBlock(b *Block) { + f := b.Func + want := f.NumValues() + + scores := ft.reusedTopoSortScoresTable + if len(scores) < want { + if want <= cap(scores) { + scores = scores[:want] + } else { + if cap(scores) > 0 { + f.Cache.freeUintSlice(scores) + } + scores = f.Cache.allocUintSlice(want) + ft.reusedTopoSortScoresTable = scores + } + } + + for _, v := range b.Values { + scores[v.ID] = 0 // sentinel + } + + slices.SortFunc(b.Values, func(a, b *Value) int { + dependencyScoreA := getDependencyScore(scores, a) + dependencyScoreB := getDependencyScore(scores, b) + if dependencyScoreA != dependencyScoreB { + return cmp.Compare(dependencyScoreA, dependencyScoreB) + } + return cmp.Compare(a.ID, b.ID) + }) +} diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index bcb5dec09d3..4d022555b7b 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -119,10 +119,12 @@ import ( "cmd/compile/internal/types" "cmd/internal/src" "cmd/internal/sys" + "cmp" "fmt" "internal/buildcfg" "math" "math/bits" + "slices" "unsafe" ) @@ -1021,9 +1023,11 @@ func (s *regAllocState) regalloc(f *Func) { // Initialize regValLiveSet and uses fields for this block. // Walk backwards through the block doing liveness analysis. regValLiveSet.clear() - for _, e := range s.live[b.ID] { - s.addUse(e.ID, int32(len(b.Values))+e.dist, e.pos) // pseudo-uses from beyond end of block - regValLiveSet.add(e.ID) + if s.live != nil { + for _, e := range s.live[b.ID] { + s.addUse(e.ID, int32(len(b.Values))+e.dist, e.pos) // pseudo-uses from beyond end of block + regValLiveSet.add(e.ID) + } } for _, v := range b.ControlValues() { if s.values[v.ID].needReg { @@ -1343,7 +1347,9 @@ func (s *regAllocState) regalloc(f *Func) { } // Load static desired register info at the end of the block. - desired.copy(&s.desired[b.ID]) + if s.desired != nil { + desired.copy(&s.desired[b.ID]) + } // Check actual assigned registers at the start of the next block(s). // Dynamically assigned registers will trump the static @@ -1385,7 +1391,7 @@ func (s *regAllocState) regalloc(f *Func) { } } // Walk values backwards computing desired register info. - // See computeLive for more comments. + // See computeDesired for more comments. for i := len(oldSched) - 1; i >= 0; i-- { v := oldSched[i] prefs := desired.remove(v.ID) @@ -2059,8 +2065,10 @@ func (s *regAllocState) regalloc(f *Func) { if checkEnabled { regValLiveSet.clear() - for _, x := range s.live[b.ID] { - regValLiveSet.add(x.ID) + if s.live != nil { + for _, x := range s.live[b.ID] { + regValLiveSet.add(x.ID) + } } for r := register(0); r < s.numRegs; r++ { v := s.regs[r].v @@ -2077,37 +2085,39 @@ func (s *regAllocState) regalloc(f *Func) { // isn't in a register, generate a use for the spill location. // We need to remember this information so that // the liveness analysis in stackalloc is correct. - for _, e := range s.live[b.ID] { - vi := &s.values[e.ID] - if vi.regs != 0 { - // in a register, we'll use that source for the merge. - continue + if s.live != nil { + for _, e := range s.live[b.ID] { + vi := &s.values[e.ID] + if vi.regs != 0 { + // in a register, we'll use that source for the merge. + continue + } + if vi.rematerializeable { + // we'll rematerialize during the merge. + continue + } + if s.f.pass.debug > regDebug { + fmt.Printf("live-at-end spill for %s at %s\n", s.orig[e.ID], b) + } + spill := s.makeSpill(s.orig[e.ID], b) + s.spillLive[b.ID] = append(s.spillLive[b.ID], spill.ID) } - if vi.rematerializeable { - // we'll rematerialize during the merge. - continue - } - if s.f.pass.debug > regDebug { - fmt.Printf("live-at-end spill for %s at %s\n", s.orig[e.ID], b) - } - spill := s.makeSpill(s.orig[e.ID], b) - s.spillLive[b.ID] = append(s.spillLive[b.ID], spill.ID) - } - // Clear any final uses. - // All that is left should be the pseudo-uses added for values which - // are live at the end of b. - for _, e := range s.live[b.ID] { - u := s.values[e.ID].uses - if u == nil { - f.Fatalf("live at end, no uses v%d", e.ID) + // Clear any final uses. + // All that is left should be the pseudo-uses added for values which + // are live at the end of b. + for _, e := range s.live[b.ID] { + u := s.values[e.ID].uses + if u == nil { + f.Fatalf("live at end, no uses v%d", e.ID) + } + if u.next != nil { + f.Fatalf("live at end, too many uses v%d", e.ID) + } + s.values[e.ID].uses = nil + u.next = s.freeUseRecords + s.freeUseRecords = u } - if u.next != nil { - f.Fatalf("live at end, too many uses v%d", e.ID) - } - s.values[e.ID].uses = nil - u.next = s.freeUseRecords - s.freeUseRecords = u } // allocReg may have dropped registers from startRegsMask that @@ -2207,8 +2217,8 @@ func (s *regAllocState) placeSpills() { best := v.Block bestArg := v var bestDepth int16 - if l := s.loopnest.b2l[best.ID]; l != nil { - bestDepth = l.depth + if s.loopnest != nil && s.loopnest.b2l[best.ID] != nil { + bestDepth = s.loopnest.b2l[best.ID].depth } b := best const maxSpillSearch = 100 @@ -2230,8 +2240,8 @@ func (s *regAllocState) placeSpills() { } var depth int16 - if l := s.loopnest.b2l[b.ID]; l != nil { - depth = l.depth + if s.loopnest != nil && s.loopnest.b2l[b.ID] != nil { + depth = s.loopnest.b2l[b.ID].depth } if depth > bestDepth { // Don't push the spill into a deeper loop. @@ -2811,47 +2821,82 @@ type liveInfo struct { // computeLive computes a map from block ID to a list of value IDs live at the end // of that block. Together with the value ID is a count of how many instructions // to the next use of that value. The resulting map is stored in s.live. -// computeLive also computes the desired register information at the end of each block. -// This desired register information is stored in s.desired. -// TODO: this could be quadratic if lots of variables are live across lots of -// basic blocks. Figure out a way to make this function (or, more precisely, the user -// of this function) require only linear size & time. func (s *regAllocState) computeLive() { f := s.f + // single block functions do not have variables that are live across + // branches + if len(f.Blocks) == 1 { + return + } + po := f.postorder() s.live = make([][]liveInfo, f.NumBlocks()) s.desired = make([]desiredState, f.NumBlocks()) - var phis []*Value + s.loopnest = f.loopnest() + + rematIDs := make([]ID, 0, 64) live := f.newSparseMapPos(f.NumValues()) defer f.retSparseMapPos(live) t := f.newSparseMapPos(f.NumValues()) defer f.retSparseMapPos(t) - // Keep track of which value we want in each register. - var desired desiredState - - // Instead of iterating over f.Blocks, iterate over their postordering. - // Liveness information flows backward, so starting at the end - // increases the probability that we will stabilize quickly. - // TODO: Do a better job yet. Here's one possibility: - // Calculate the dominator tree and locate all strongly connected components. - // If a value is live in one block of an SCC, it is live in all. - // Walk the dominator tree from end to beginning, just once, treating SCC - // components as single blocks, duplicated calculated liveness information - // out to all of them. - po := f.postorder() - s.loopnest = f.loopnest() s.loopnest.computeUnavoidableCalls() + + // Liveness analysis. + // This is an adapted version of the algorithm described in chapter 2.4.2 + // of Fabrice Rastello's On Sparse Intermediate Representations. + // https://web.archive.org/web/20240417212122if_/https://inria.hal.science/hal-00761555/file/habilitation.pdf#section.50 + // + // For our implementation, we fall back to a traditional iterative algorithm when we encounter + // Irreducible CFGs. They are very uncommon in Go code because they need to be constructed with + // gotos and our current loopnest definition does not compute all the information that + // we'd need to compute the loop ancestors for that step of the algorithm. + // + // Additionally, instead of only considering non-loop successors in the initial DFS phase, + // we compute the liveout as the union of all successors. This larger liveout set is a subset + // of the final liveout for the block and adding this information in the DFS phase means that + // we get slightly more accurate distance information. + var loopLiveIn map[*loop][]liveInfo + var numCalls []int32 + if len(s.loopnest.loops) > 0 && !s.loopnest.hasIrreducible { + loopLiveIn = make(map[*loop][]liveInfo) + numCalls = f.Cache.allocInt32Slice(f.NumBlocks()) + defer f.Cache.freeInt32Slice(numCalls) + } + for { changed := false for _, b := range po { // Start with known live values at the end of the block. - // Add len(b.Values) to adjust from end-of-block distance - // to beginning-of-block distance. live.clear() for _, e := range s.live[b.ID] { - live.set(e.ID, e.dist+int32(len(b.Values)), e.pos) + live.set(e.ID, e.dist, e.pos) + } + update := false + // arguments to phi nodes are live at this blocks out + for _, e := range b.Succs { + succ := e.b + delta := branchDistance(b, succ) + for _, v := range succ.Values { + if v.Op != OpPhi { + break + } + arg := v.Args[e.i] + if s.values[arg.ID].needReg && (!live.contains(arg.ID) || delta < live.get(arg.ID)) { + live.set(arg.ID, delta, v.Pos) + update = true + } + } + } + if update { + s.live[b.ID] = updateLive(live, s.live[b.ID]) + } + // Add len(b.Values) to adjust from end-of-block distance + // to beginning-of-block distance. + c := live.contents() + for i := range c { + c[i].val += int32(len(b.Values)) } // Mark control values as live @@ -2861,21 +2906,30 @@ func (s *regAllocState) computeLive() { } } - // Propagate backwards to the start of the block - // Assumes Values have been scheduled. - phis = phis[:0] for i := len(b.Values) - 1; i >= 0; i-- { v := b.Values[i] live.remove(v.ID) if v.Op == OpPhi { - // save phi ops for later - phis = append(phis, v) continue } if opcodeTable[v.Op].call { + if numCalls != nil { + numCalls[b.ID]++ + } + rematIDs = rematIDs[:0] c := live.contents() for i := range c { c[i].val += unlikelyDistance + vid := c[i].key + if s.values[vid].rematerializeable { + rematIDs = append(rematIDs, vid) + } + } + // We don't spill rematerializeable values, and assuming they + // are live across a call would only force shuffle to add some + // (dead) constant rematerialization. Remove them. + for _, r := range rematIDs { + live.remove(r) } } for _, a := range v.Args { @@ -2884,7 +2938,207 @@ func (s *regAllocState) computeLive() { } } } - // Propagate desired registers backwards. + // This is a loop header, save our live-in so that + // we can use it to fill in the loop bodies later + if loopLiveIn != nil { + loop := s.loopnest.b2l[b.ID] + if loop != nil && loop.header.ID == b.ID { + loopLiveIn[loop] = updateLive(live, nil) + } + } + // For each predecessor of b, expand its list of live-at-end values. + // invariant: live contains the values live at the start of b + for _, e := range b.Preds { + p := e.b + delta := branchDistance(p, b) + + // Start t off with the previously known live values at the end of p. + t.clear() + for _, e := range s.live[p.ID] { + t.set(e.ID, e.dist, e.pos) + } + update := false + + // Add new live values from scanning this block. + for _, e := range live.contents() { + d := e.val + delta + if !t.contains(e.key) || d < t.get(e.key) { + update = true + t.set(e.key, d, e.pos) + } + } + + if !update { + continue + } + s.live[p.ID] = updateLive(t, s.live[p.ID]) + changed = true + } + } + + // Doing a traditional iterative algorithm and have run + // out of changes + if !changed { + break + } + + // Doing a pre-pass and will fill in the liveness information + // later + if loopLiveIn != nil { + break + } + // For loopless code, we have full liveness info after a single + // iteration + if len(s.loopnest.loops) == 0 { + break + } + } + if f.pass.debug > regDebug { + s.debugPrintLive("after dfs walk", f, s.live, s.desired) + } + + // irreducible CFGs and functions without loops are already + // done, compute their desired registers and return + if loopLiveIn == nil { + s.computeDesired() + return + } + + // Walk the loopnest from outer to inner, adding + // all live-in values from their parent. Instead of + // a recursive algorithm, iterate in depth order. + // TODO(dmo): can we permute the loopnest? can we avoid this copy? + loops := slices.Clone(s.loopnest.loops) + slices.SortFunc(loops, func(a, b *loop) int { + return cmp.Compare(a.depth, b.depth) + }) + + loopset := f.newSparseMapPos(f.NumValues()) + defer f.retSparseMapPos(loopset) + for _, loop := range loops { + if loop.outer == nil { + continue + } + livein := loopLiveIn[loop] + loopset.clear() + for _, l := range livein { + loopset.set(l.ID, l.dist, l.pos) + } + update := false + for _, l := range loopLiveIn[loop.outer] { + if !loopset.contains(l.ID) { + loopset.set(l.ID, l.dist, l.pos) + update = true + } + } + if update { + loopLiveIn[loop] = updateLive(loopset, livein) + } + } + // unknownDistance is a sentinel value for when we know a variable + // is live at any given block, but we do not yet know how far until it's next + // use. The distance will be computed later. + const unknownDistance = -1 + + // add live-in values of the loop headers to their children. + // This includes the loop headers themselves, since they can have values + // that die in the middle of the block and aren't live-out + for _, b := range po { + loop := s.loopnest.b2l[b.ID] + if loop == nil { + continue + } + headerLive := loopLiveIn[loop] + loopset.clear() + for _, l := range s.live[b.ID] { + loopset.set(l.ID, l.dist, l.pos) + } + update := false + for _, l := range headerLive { + if !loopset.contains(l.ID) { + loopset.set(l.ID, unknownDistance, src.NoXPos) + update = true + } + } + if update { + s.live[b.ID] = updateLive(loopset, s.live[b.ID]) + } + } + if f.pass.debug > regDebug { + s.debugPrintLive("after live loop prop", f, s.live, s.desired) + } + // Filling in liveness from loops leaves some blocks with no distance information + // Run over them and fill in the information from their successors. + // To stabilize faster, we quit when no block has missing values and we only + // look at blocks that still have missing values in subsequent iterations + unfinishedBlocks := f.Cache.allocBlockSlice(len(po)) + defer f.Cache.freeBlockSlice(unfinishedBlocks) + copy(unfinishedBlocks, po) + + for len(unfinishedBlocks) > 0 { + n := 0 + for _, b := range unfinishedBlocks { + live.clear() + unfinishedValues := 0 + for _, l := range s.live[b.ID] { + if l.dist == unknownDistance { + unfinishedValues++ + } + live.set(l.ID, l.dist, l.pos) + } + update := false + for _, e := range b.Succs { + succ := e.b + for _, l := range s.live[succ.ID] { + if !live.contains(l.ID) || l.dist == unknownDistance { + continue + } + dist := int32(len(succ.Values)) + l.dist + branchDistance(b, succ) + dist += numCalls[succ.ID] * unlikelyDistance + val := live.get(l.ID) + switch { + case val == unknownDistance: + unfinishedValues-- + fallthrough + case dist < val: + update = true + live.set(l.ID, dist, l.pos) + } + } + } + if update { + s.live[b.ID] = updateLive(live, s.live[b.ID]) + } + if unfinishedValues > 0 { + unfinishedBlocks[n] = b + n++ + } + } + unfinishedBlocks = unfinishedBlocks[:n] + } + + s.computeDesired() + + if f.pass.debug > regDebug { + s.debugPrintLive("final", f, s.live, s.desired) + } +} + +// computeDesired computes the desired register information at the end of each block. +// It is essentially a liveness analysis on machine registers instead of SSA values +// The desired register information is stored in s.desired. +func (s *regAllocState) computeDesired() { + + // TODO: Can we speed this up using the liveness information we have already + // from computeLive? + // TODO: Since we don't propagate information through phi nodes, can we do + // this as a single dominator tree walk instead of the iterative solution? + var desired desiredState + f := s.f + po := f.postorder() + for { + changed := false + for _, b := range po { desired.copy(&s.desired[b.ID]) for i := len(b.Values) - 1; i >= 0; i-- { v := b.Values[i] @@ -2919,106 +3173,85 @@ func (s *regAllocState) computeLive() { desired.addList(v.Args[0].ID, prefs) } } - - // For each predecessor of b, expand its list of live-at-end values. - // invariant: live contains the values live at the start of b (excluding phi inputs) - for i, e := range b.Preds { + for _, e := range b.Preds { p := e.b - // Compute additional distance for the edge. - // Note: delta must be at least 1 to distinguish the control - // value use from the first user in a successor block. - delta := int32(normalDistance) - if len(p.Succs) == 2 { - if p.Succs[0].b == b && p.Likely == BranchLikely || - p.Succs[1].b == b && p.Likely == BranchUnlikely { - delta = likelyDistance - } - if p.Succs[0].b == b && p.Likely == BranchUnlikely || - p.Succs[1].b == b && p.Likely == BranchLikely { - delta = unlikelyDistance - } - } - - // Update any desired registers at the end of p. - s.desired[p.ID].merge(&desired) - - // Start t off with the previously known live values at the end of p. - t.clear() - for _, e := range s.live[p.ID] { - t.set(e.ID, e.dist, e.pos) - } - update := false - - // Add new live values from scanning this block. - for _, e := range live.contents() { - d := e.val + delta - if !t.contains(e.key) || d < t.get(e.key) { - update = true - t.set(e.key, d, e.pos) - } - } - // Also add the correct arg from the saved phi values. - // All phis are at distance delta (we consider them - // simultaneously happening at the start of the block). - for _, v := range phis { - id := v.Args[i].ID - if s.values[id].needReg && (!t.contains(id) || delta < t.get(id)) { - update = true - t.set(id, delta, v.Pos) - } - } - - if !update { - continue - } - // The live set has changed, update it. - l := s.live[p.ID][:0] - if cap(l) < t.size() { - l = make([]liveInfo, 0, t.size()) - } - for _, e := range t.contents() { - l = append(l, liveInfo{e.key, e.val, e.pos}) - } - s.live[p.ID] = l - changed = true + changed = s.desired[p.ID].merge(&desired) || changed } } - - if !changed { + if !changed || (!s.loopnest.hasIrreducible && len(s.loopnest.loops) == 0) { break } } - if f.pass.debug > regDebug { - fmt.Println("live values at end of each block") - for _, b := range f.Blocks { - fmt.Printf(" %s:", b) - for _, x := range s.live[b.ID] { - fmt.Printf(" v%d(%d)", x.ID, x.dist) - for _, e := range s.desired[b.ID].entries { - if e.ID != x.ID { - continue - } - fmt.Printf("[") - first := true - for _, r := range e.regs { - if r == noRegister { - continue - } - if !first { - fmt.Printf(",") - } - fmt.Print(&s.registers[r]) - first = false - } - fmt.Printf("]") - } - } - if avoid := s.desired[b.ID].avoid; avoid != 0 { - fmt.Printf(" avoid=%v", s.RegMaskString(avoid)) - } - fmt.Println() +} + +// updateLive updates a given liveInfo slice with the contents of t +func updateLive(t *sparseMapPos, live []liveInfo) []liveInfo { + live = live[:0] + if cap(live) < t.size() { + live = make([]liveInfo, 0, t.size()) + } + for _, e := range t.contents() { + live = append(live, liveInfo{e.key, e.val, e.pos}) + } + return live +} + +// branchDistance calculates the distance between a block and a +// successor in pseudo-instructions. This is used to indicate +// likeliness +func branchDistance(b *Block, s *Block) int32 { + if len(b.Succs) == 2 { + if b.Succs[0].b == s && b.Likely == BranchLikely || + b.Succs[1].b == s && b.Likely == BranchUnlikely { + return likelyDistance + } + if b.Succs[0].b == s && b.Likely == BranchUnlikely || + b.Succs[1].b == s && b.Likely == BranchLikely { + return unlikelyDistance } } + // Note: the branch distance must be at least 1 to distinguish the control + // value use from the first user in a successor block. + return normalDistance +} + +func (s *regAllocState) debugPrintLive(stage string, f *Func, live [][]liveInfo, desired []desiredState) { + fmt.Printf("%s: live values at end of each block: %s\n", stage, f.Name) + for _, b := range f.Blocks { + s.debugPrintLiveBlock(b, live[b.ID], &desired[b.ID]) + } +} + +func (s *regAllocState) debugPrintLiveBlock(b *Block, live []liveInfo, desired *desiredState) { + fmt.Printf(" %s:", b) + slices.SortFunc(live, func(a, b liveInfo) int { + return cmp.Compare(a.ID, b.ID) + }) + for _, x := range live { + fmt.Printf(" v%d(%d)", x.ID, x.dist) + for _, e := range desired.entries { + if e.ID != x.ID { + continue + } + fmt.Printf("[") + first := true + for _, r := range e.regs { + if r == noRegister { + continue + } + if !first { + fmt.Printf(",") + } + fmt.Print(&s.registers[r]) + first = false + } + fmt.Printf("]") + } + } + if avoid := desired.avoid; avoid != 0 { + fmt.Printf(" avoid=%v", s.RegMaskString(avoid)) + } + fmt.Println() } // A desiredState represents desired register assignments. @@ -3134,14 +3367,17 @@ func (d *desiredState) remove(vid ID) [4]register { return [4]register{noRegister, noRegister, noRegister, noRegister} } -// merge merges another desired state x into d. -func (d *desiredState) merge(x *desiredState) { +// merge merges another desired state x into d. Returns whether the set has +// changed +func (d *desiredState) merge(x *desiredState) bool { + oldAvoid := d.avoid d.avoid |= x.avoid // There should only be a few desired registers, so // linear insert is ok. for _, e := range x.entries { d.addList(e.ID, e.regs) } + return oldAvoid != d.avoid } // computeUnavoidableCalls computes the containsUnavoidableCall fields in the loop nest. diff --git a/src/cmd/compile/internal/ssa/regalloc_test.go b/src/cmd/compile/internal/ssa/regalloc_test.go index 79f94da0114..12f5820f1ff 100644 --- a/src/cmd/compile/internal/ssa/regalloc_test.go +++ b/src/cmd/compile/internal/ssa/regalloc_test.go @@ -265,6 +265,48 @@ func TestClobbersArg1(t *testing.T) { } } +func TestNoRematerializeDeadConstant(t *testing.T) { + c := testConfigARM64(t) + f := c.Fun("b1", + Bloc("b1", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("addr", OpArg, c.config.Types.Int32.PtrTo(), 0, c.Temp(c.config.Types.Int32.PtrTo())), + Valu("const", OpARM64MOVDconst, c.config.Types.Int32, -1, nil), // Original constant + Valu("cmp", OpARM64CMPconst, types.TypeFlags, 0, nil, "const"), + Goto("b2"), + ), + Bloc("b2", + Valu("phi_mem", OpPhi, types.TypeMem, 0, nil, "mem", "callmem"), + Eq("cmp", "b6", "b3"), + ), + Bloc("b3", + Valu("call", OpARM64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "phi_mem"), + Valu("callmem", OpSelectN, types.TypeMem, 0, nil, "call"), + Eq("cmp", "b5", "b4"), + ), + Bloc("b4", // A block where we don't really need to rematerialize the constant -1 + Goto("b2"), + ), + Bloc("b5", + Valu("user", OpAMD64MOVQstore, types.TypeMem, 0, nil, "addr", "const", "callmem"), + Exit("user"), + ), + Bloc("b6", + Exit("phi_mem"), + ), + ) + + regalloc(f.f) + checkFunc(f.f) + + // Check that in block b4, there's no dead rematerialization of the constant -1 + for _, v := range f.blocks["b4"].Values { + if v.Op == OpARM64MOVDconst && v.AuxInt == -1 { + t.Errorf("constant -1 rematerialized in loop block b4: %s", v.LongString()) + } + } +} + func numSpills(b *Block) int { return numOps(b, OpStoreReg) } diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index 47f225c7aeb..07308973b15 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -57,11 +57,15 @@ func applyRewrite(f *Func, rb blockRewriter, rv valueRewriter, deadcode deadValu var iters int var states map[string]bool for { + if debug > 1 { + fmt.Printf("%s: iter %d\n", f.pass.name, iters) + } change := false deadChange := false for _, b := range f.Blocks { var b0 *Block if debug > 1 { + fmt.Printf("%s: start block\n", f.pass.name) b0 = new(Block) *b0 = *b b0.Succs = append([]Edge{}, b.Succs...) // make a new copy, not aliasing @@ -79,6 +83,9 @@ func applyRewrite(f *Func, rb blockRewriter, rv valueRewriter, deadcode deadValu } } for j, v := range b.Values { + if debug > 1 { + fmt.Printf("%s: consider %v\n", f.pass.name, v.LongString()) + } var v0 *Value if debug > 1 { v0 = new(Value) @@ -456,6 +463,26 @@ func isSameCall(aux Aux, name string) bool { return fn != nil && fn.String() == name } +func isMalloc(aux Aux) bool { + return isNewObject(aux) || isSpecializedMalloc(aux) +} + +func isNewObject(aux Aux) bool { + fn := aux.(*AuxCall).Fn + return fn != nil && fn.String() == "runtime.newobject" +} + +func isSpecializedMalloc(aux Aux) bool { + fn := aux.(*AuxCall).Fn + if fn == nil { + return false + } + name := fn.String() + return strings.HasPrefix(name, "runtime.mallocgcSmallNoScanSC") || + strings.HasPrefix(name, "runtime.mallocgcSmallScanNoHeaderSC") || + strings.HasPrefix(name, "runtime.mallocTiny") +} + // canLoadUnaligned reports if the architecture supports unaligned load operations. func canLoadUnaligned(c *Config) bool { return c.ctxt.Arch.Alignment == 1 @@ -706,7 +733,7 @@ func int32ToAuxInt(i int32) int64 { return int64(i) } func int64ToAuxInt(i int64) int64 { - return int64(i) + return i } func uint8ToAuxInt(i uint8) int64 { return int64(int8(i)) @@ -737,6 +764,7 @@ func arm64ConditionalParamsToAuxInt(v arm64ConditionalParams) int64 { i |= int64(v.cond) return i } + func flagConstantToAuxInt(x flagConstant) int64 { return int64(x) } @@ -1235,10 +1263,8 @@ func logRule(s string) { } ruleFile = w } - _, err := fmt.Fprintln(ruleFile, s) - if err != nil { - panic(err) - } + // Ignore errors in case of multiple processes fighting over the file. + fmt.Fprintln(ruleFile, s) } var ruleFile io.Writer @@ -1583,7 +1609,7 @@ func encodePPC64RotateMask(rotate, mask, nbits int64) int64 { mb, me = men, mbn } - return int64(me) | int64(mb<<8) | int64(rotate<<16) | int64(nbits<<24) + return int64(me) | int64(mb<<8) | rotate<<16 | nbits<<24 } // Merge (RLDICL [encoded] (SRDconst [s] x)) into (RLDICL [new_encoded] x) @@ -1692,7 +1718,7 @@ func mergePPC64AndSldi(m, s int64) int64 { func mergePPC64ClrlsldiSrw(sld, srw int64) int64 { mask_1 := uint64(0xFFFFFFFF >> uint(srw)) // for CLRLSLDI, it's more convenient to think of it as a mask left bits then rotate left. - mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(int64(sld))) + mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(sld)) // Rewrite mask to apply after the final left shift. mask_3 := (mask_1 & mask_2) << uint(GetPPC64Shiftsh(sld)) @@ -1704,7 +1730,7 @@ func mergePPC64ClrlsldiSrw(sld, srw int64) int64 { if uint64(uint32(mask_3)) != mask_3 || mask_3 == 0 { return 0 } - return encodePPC64RotateMask(int64(r_3), int64(mask_3), 32) + return encodePPC64RotateMask(r_3, int64(mask_3), 32) } // Test if a doubleword shift right feeding into a CLRLSLDI can be merged into RLWINM. @@ -1712,7 +1738,7 @@ func mergePPC64ClrlsldiSrw(sld, srw int64) int64 { func mergePPC64ClrlsldiSrd(sld, srd int64) int64 { mask_1 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(srd) // for CLRLSLDI, it's more convenient to think of it as a mask left bits then rotate left. - mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(int64(sld))) + mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(sld)) // Rewrite mask to apply after the final left shift. mask_3 := (mask_1 & mask_2) << uint(GetPPC64Shiftsh(sld)) @@ -1729,7 +1755,7 @@ func mergePPC64ClrlsldiSrd(sld, srd int64) int64 { if v1&mask_3 != 0 { return 0 } - return encodePPC64RotateMask(int64(r_3&31), int64(mask_3), 32) + return encodePPC64RotateMask(r_3&31, int64(mask_3), 32) } // Test if a RLWINM feeding into a CLRLSLDI can be merged into RLWINM. Return @@ -2135,11 +2161,11 @@ func rewriteFixedLoad(v *Value, sym Sym, sb *Value, off int64) *Value { switch f.Sym.Name { case "Size_": v.reset(ptrSizedOpConst) - v.AuxInt = int64(t.Size()) + v.AuxInt = t.Size() return v case "PtrBytes": v.reset(ptrSizedOpConst) - v.AuxInt = int64(types.PtrDataSize(t)) + v.AuxInt = types.PtrDataSize(t) return v case "Hash": v.reset(OpConst32) @@ -2599,55 +2625,57 @@ func rewriteStructStore(v *Value) *Value { return mem } -// isDirectType reports whether v represents a type +// isDirectAndComparableType reports whether v represents a type // (a *runtime._type) whose value is stored directly in an -// interface (i.e., is pointer or pointer-like). -func isDirectType(v *Value) bool { - return isDirectType1(v) +// interface (i.e., is pointer or pointer-like) and is comparable. +func isDirectAndComparableType(v *Value) bool { + return isDirectAndComparableType1(v) } // v is a type -func isDirectType1(v *Value) bool { +func isDirectAndComparableType1(v *Value) bool { switch v.Op { case OpITab: - return isDirectType2(v.Args[0]) + return isDirectAndComparableType2(v.Args[0]) case OpAddr: lsym := v.Aux.(*obj.LSym) if ti := lsym.TypeInfo(); ti != nil { - return types.IsDirectIface(ti.Type.(*types.Type)) + t := ti.Type.(*types.Type) + return types.IsDirectIface(t) && types.IsComparable(t) } } return false } // v is an empty interface -func isDirectType2(v *Value) bool { +func isDirectAndComparableType2(v *Value) bool { switch v.Op { case OpIMake: - return isDirectType1(v.Args[0]) + return isDirectAndComparableType1(v.Args[0]) } return false } -// isDirectIface reports whether v represents an itab +// isDirectAndComparableIface reports whether v represents an itab // (a *runtime._itab) for a type whose value is stored directly -// in an interface (i.e., is pointer or pointer-like). -func isDirectIface(v *Value) bool { - return isDirectIface1(v, 9) +// in an interface (i.e., is pointer or pointer-like) and is comparable. +func isDirectAndComparableIface(v *Value) bool { + return isDirectAndComparableIface1(v, 9) } // v is an itab -func isDirectIface1(v *Value, depth int) bool { +func isDirectAndComparableIface1(v *Value, depth int) bool { if depth == 0 { return false } switch v.Op { case OpITab: - return isDirectIface2(v.Args[0], depth-1) + return isDirectAndComparableIface2(v.Args[0], depth-1) case OpAddr: lsym := v.Aux.(*obj.LSym) if ii := lsym.ItabInfo(); ii != nil { - return types.IsDirectIface(ii.Type.(*types.Type)) + t := ii.Type.(*types.Type) + return types.IsDirectIface(t) && types.IsComparable(t) } case OpConstNil: // We can treat this as direct, because if the itab is @@ -2658,16 +2686,16 @@ func isDirectIface1(v *Value, depth int) bool { } // v is an interface -func isDirectIface2(v *Value, depth int) bool { +func isDirectAndComparableIface2(v *Value, depth int) bool { if depth == 0 { return false } switch v.Op { case OpIMake: - return isDirectIface1(v.Args[0], depth-1) + return isDirectAndComparableIface1(v.Args[0], depth-1) case OpPhi: for _, a := range v.Args { - if !isDirectIface2(a, depth-1) { + if !isDirectAndComparableIface2(a, depth-1) { return false } } diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go index 04954387106..be88dd3cdda 100644 --- a/src/cmd/compile/internal/ssa/rewrite386.go +++ b/src/cmd/compile/internal/ssa/rewrite386.go @@ -257,6 +257,9 @@ func rewriteValue386(v *Value) bool { case OpAdd32carry: v.Op = Op386ADDLcarry return true + case OpAdd32carrywithcarry: + v.Op = Op386ADCLcarry + return true case OpAdd32withcarry: v.Op = Op386ADCL return true diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index ef0bddc70a0..bf0e79de0bf 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -5,6 +5,7 @@ package ssa import "internal/buildcfg" import "math" import "cmd/internal/obj" +import "cmd/compile/internal/base" import "cmd/compile/internal/types" func rewriteValueAMD64(v *Value) bool { @@ -2970,11 +2971,9 @@ func rewriteValueAMD64(v *Value) bool { case OpCvt16toMask8x16: return rewriteValueAMD64_OpCvt16toMask8x16(v) case OpCvt32Fto32: - v.Op = OpAMD64CVTTSS2SL - return true + return rewriteValueAMD64_OpCvt32Fto32(v) case OpCvt32Fto64: - v.Op = OpAMD64CVTTSS2SQ - return true + return rewriteValueAMD64_OpCvt32Fto64(v) case OpCvt32Fto64F: v.Op = OpAMD64CVTSS2SD return true @@ -2989,14 +2988,12 @@ func rewriteValueAMD64(v *Value) bool { case OpCvt32toMask8x32: return rewriteValueAMD64_OpCvt32toMask8x32(v) case OpCvt64Fto32: - v.Op = OpAMD64CVTTSD2SL - return true + return rewriteValueAMD64_OpCvt64Fto32(v) case OpCvt64Fto32F: v.Op = OpAMD64CVTSD2SS return true case OpCvt64Fto64: - v.Op = OpAMD64CVTTSD2SQ - return true + return rewriteValueAMD64_OpCvt64Fto64(v) case OpCvt64to32F: v.Op = OpAMD64CVTSQ2SS return true @@ -62708,7 +62705,6 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types // match: (CondSelect x y (SETEQ cond)) // cond: (is64BitInt(t) || isPtr(t)) // result: (CMOVQEQ y x cond) @@ -63466,60 +63462,6 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { return true } // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 1 - // result: (CondSelect x y (MOVBQZX check)) - for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 1) { - break - } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) - return true - } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 2 - // result: (CondSelect x y (MOVWQZX check)) - for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 2) { - break - } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) - return true - } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 4 - // result: (CondSelect x y (MOVLQZX check)) - for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 4) { - break - } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) - return true - } - // match: (CondSelect x y check) // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) // result: (CMOVQNE y x (CMPQconst [0] check)) for { @@ -63573,6 +63515,168 @@ func rewriteValueAMD64_OpCondSelect(v *Value) bool { v.AddArg3(y, x, v0) return true } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 4 && (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNE y x (CMPLconst [0] check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 4 && (is64BitInt(t) || isPtr(t))) { + break + } + v.reset(OpAMD64CMOVQNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 4 && is32BitInt(t) + // result: (CMOVLNE y x (CMPLconst [0] check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 4 && is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 4 && is16BitInt(t) + // result: (CMOVWNE y x (CMPLconst [0] check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 4 && is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 2 && (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNE y x (CMPWconst [0] check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 2 && (is64BitInt(t) || isPtr(t))) { + break + } + v.reset(OpAMD64CMOVQNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v0.AuxInt = int16ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 2 && is32BitInt(t) + // result: (CMOVLNE y x (CMPWconst [0] check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 2 && is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v0.AuxInt = int16ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 2 && is16BitInt(t) + // result: (CMOVWNE y x (CMPWconst [0] check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 2 && is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v0.AuxInt = int16ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 1 && (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNE y x (CMPBconst [0] check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 1 && (is64BitInt(t) || isPtr(t))) { + break + } + v.reset(OpAMD64CMOVQNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 1 && is32BitInt(t) + // result: (CMOVLNE y x (CMPBconst [0] check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 1 && is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 1 && is16BitInt(t) + // result: (CMOVWNE y x (CMPBconst [0] check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 1 && is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) + return true + } return false } func rewriteValueAMD64_OpConst16(v *Value) bool { @@ -63884,6 +63988,98 @@ func rewriteValueAMD64_OpCvt16toMask8x16(v *Value) bool { return true } } +func rewriteValueAMD64_OpCvt32Fto32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt32Fto32 x) + // cond: base.ConvertHash.MatchPos(v.Pos, nil) + // result: (XORL y (SARLconst [31] (ANDL y:(CVTTSS2SL x) (NOTL (MOVLf2i x))))) + for { + t := v.Type + x := v_0 + if !(base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(OpAMD64XORL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64SARLconst, t) + v0.AuxInt = int8ToAuxInt(31) + v1 := b.NewValue0(v.Pos, OpAMD64ANDL, t) + y := b.NewValue0(v.Pos, OpAMD64CVTTSS2SL, t) + y.AddArg(x) + v3 := b.NewValue0(v.Pos, OpAMD64NOTL, typ.Int32) + v4 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) + v4.AddArg(x) + v3.AddArg(v4) + v1.AddArg2(y, v3) + v0.AddArg(v1) + v.AddArg2(y, v0) + return true + } + // match: (Cvt32Fto32 x) + // cond: !base.ConvertHash.MatchPos(v.Pos, nil) + // result: (CVTTSS2SL x) + for { + t := v.Type + x := v_0 + if !(!base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(OpAMD64CVTTSS2SL) + v.Type = t + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCvt32Fto64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt32Fto64 x) + // cond: base.ConvertHash.MatchPos(v.Pos, nil) + // result: (XORQ y (SARQconst [63] (ANDQ y:(CVTTSS2SQ x) (NOTQ (MOVQf2i (CVTSS2SD x))) ))) + for { + t := v.Type + x := v_0 + if !(base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(OpAMD64XORQ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64SARQconst, t) + v0.AuxInt = int8ToAuxInt(63) + v1 := b.NewValue0(v.Pos, OpAMD64ANDQ, t) + y := b.NewValue0(v.Pos, OpAMD64CVTTSS2SQ, t) + y.AddArg(x) + v3 := b.NewValue0(v.Pos, OpAMD64NOTQ, typ.Int64) + v4 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) + v5 := b.NewValue0(v.Pos, OpAMD64CVTSS2SD, typ.Float64) + v5.AddArg(x) + v4.AddArg(v5) + v3.AddArg(v4) + v1.AddArg2(y, v3) + v0.AddArg(v1) + v.AddArg2(y, v0) + return true + } + // match: (Cvt32Fto64 x) + // cond: !base.ConvertHash.MatchPos(v.Pos, nil) + // result: (CVTTSS2SQ x) + for { + t := v.Type + x := v_0 + if !(!base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(OpAMD64CVTTSS2SQ) + v.Type = t + v.AddArg(x) + return true + } + return false +} func rewriteValueAMD64_OpCvt32toMask16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block @@ -63916,6 +64112,98 @@ func rewriteValueAMD64_OpCvt32toMask8x32(v *Value) bool { return true } } +func rewriteValueAMD64_OpCvt64Fto32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt64Fto32 x) + // cond: base.ConvertHash.MatchPos(v.Pos, nil) + // result: (XORL y (SARLconst [31] (ANDL y:(CVTTSD2SL x) (NOTL (MOVLf2i (CVTSD2SS x)))))) + for { + t := v.Type + x := v_0 + if !(base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(OpAMD64XORL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64SARLconst, t) + v0.AuxInt = int8ToAuxInt(31) + v1 := b.NewValue0(v.Pos, OpAMD64ANDL, t) + y := b.NewValue0(v.Pos, OpAMD64CVTTSD2SL, t) + y.AddArg(x) + v3 := b.NewValue0(v.Pos, OpAMD64NOTL, typ.Int32) + v4 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) + v5 := b.NewValue0(v.Pos, OpAMD64CVTSD2SS, typ.Float32) + v5.AddArg(x) + v4.AddArg(v5) + v3.AddArg(v4) + v1.AddArg2(y, v3) + v0.AddArg(v1) + v.AddArg2(y, v0) + return true + } + // match: (Cvt64Fto32 x) + // cond: !base.ConvertHash.MatchPos(v.Pos, nil) + // result: (CVTTSD2SL x) + for { + t := v.Type + x := v_0 + if !(!base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(OpAMD64CVTTSD2SL) + v.Type = t + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCvt64Fto64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt64Fto64 x) + // cond: base.ConvertHash.MatchPos(v.Pos, nil) + // result: (XORQ y (SARQconst [63] (ANDQ y:(CVTTSD2SQ x) (NOTQ (MOVQf2i x))))) + for { + t := v.Type + x := v_0 + if !(base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(OpAMD64XORQ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64SARQconst, t) + v0.AuxInt = int8ToAuxInt(63) + v1 := b.NewValue0(v.Pos, OpAMD64ANDQ, t) + y := b.NewValue0(v.Pos, OpAMD64CVTTSD2SQ, t) + y.AddArg(x) + v3 := b.NewValue0(v.Pos, OpAMD64NOTQ, typ.Int64) + v4 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) + v4.AddArg(x) + v3.AddArg(v4) + v1.AddArg2(y, v3) + v0.AddArg(v1) + v.AddArg2(y, v0) + return true + } + // match: (Cvt64Fto64 x) + // cond: !base.ConvertHash.MatchPos(v.Pos, nil) + // result: (CVTTSD2SQ x) + for { + t := v.Type + x := v_0 + if !(!base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(OpAMD64CVTTSD2SQ) + v.Type = t + v.AddArg(x) + return true + } + return false +} func rewriteValueAMD64_OpCvt64toMask8x64(v *Value) bool { v_0 := v.Args[0] b := v.Block diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index 44380cf8f57..2a90e7b433b 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -446,6 +446,9 @@ func rewriteValueARM(v *Value) bool { case OpAdd32carry: v.Op = OpARMADDS return true + case OpAdd32carrywithcarry: + v.Op = OpARMADCS + return true case OpAdd32withcarry: v.Op = OpARMADC return true diff --git a/src/cmd/compile/internal/ssa/rewriteLOONG64.go b/src/cmd/compile/internal/ssa/rewriteLOONG64.go index 3fc57e9f497..4262d4e0fb7 100644 --- a/src/cmd/compile/internal/ssa/rewriteLOONG64.go +++ b/src/cmd/compile/internal/ssa/rewriteLOONG64.go @@ -2368,6 +2368,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool { v_0 := v.Args[0] b := v.Block config := b.Func.Config + typ := &b.Func.Config.Types // match: (MOVBUload [off] {sym} ptr (MOVBstore [off] {sym} ptr x _)) // result: (MOVBUreg x) for { @@ -2447,6 +2448,29 @@ func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool { v.AddArg3(ptr, idx, mem) return true } + // match: (MOVBUload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVBUloadidx ptr (SLLVconst [shift] idx) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDshiftLLV { + break + } + shift := auxIntToInt64(v_0.AuxInt) + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpLOONG64MOVBUloadidx) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLVconst, typ.Int64) + v0.AuxInt = int64ToAuxInt(shift) + v0.AddArg(idx) + v.AddArg3(ptr, v0, mem) + return true + } // match: (MOVBUload [off] {sym} (SB) _) // cond: symIsRO(sym) // result: (MOVVconst [int64(read8(sym, int64(off)))]) @@ -2675,6 +2699,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool { v_0 := v.Args[0] b := v.Block config := b.Func.Config + typ := &b.Func.Config.Types // match: (MOVBload [off] {sym} ptr (MOVBstore [off] {sym} ptr x _)) // result: (MOVBreg x) for { @@ -2754,6 +2779,29 @@ func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool { v.AddArg3(ptr, idx, mem) return true } + // match: (MOVBload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVBloadidx ptr (SLLVconst [shift] idx) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDshiftLLV { + break + } + shift := auxIntToInt64(v_0.AuxInt) + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpLOONG64MOVBloadidx) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLVconst, typ.Int64) + v0.AuxInt = int64ToAuxInt(shift) + v0.AddArg(idx) + v.AddArg3(ptr, v0, mem) + return true + } // match: (MOVBload [off] {sym} (SB) _) // cond: symIsRO(sym) // result: (MOVVconst [int64(int8(read8(sym, int64(off))))]) @@ -2880,6 +2928,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBstore(v *Value) bool { v_0 := v.Args[0] b := v.Block config := b.Func.Config + typ := &b.Func.Config.Types // match: (MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVBstore [off1+int32(off2)] {sym} ptr val mem) @@ -3047,6 +3096,30 @@ func rewriteValueLOONG64_OpLOONG64MOVBstore(v *Value) bool { v.AddArg4(ptr, idx, val, mem) return true } + // match: (MOVBstore [off] {sym} (ADDshiftLLV [shift] ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (MOVBstoreidx ptr (SLLVconst [shift] idx) val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDshiftLLV { + break + } + shift := auxIntToInt64(v_0.AuxInt) + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpLOONG64MOVBstoreidx) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLVconst, typ.Int64) + v0.AuxInt = int64ToAuxInt(shift) + v0.AddArg(idx) + v.AddArg4(ptr, v0, val, mem) + return true + } return false } func rewriteValueLOONG64_OpLOONG64MOVBstoreidx(v *Value) bool { @@ -3099,6 +3172,7 @@ func rewriteValueLOONG64_OpLOONG64MOVDload(v *Value) bool { v_0 := v.Args[0] b := v.Block config := b.Func.Config + typ := &b.Func.Config.Types // match: (MOVDload [off] {sym} ptr (MOVVstore [off] {sym} ptr val _)) // result: (MOVVgpfp val) for { @@ -3178,6 +3252,29 @@ func rewriteValueLOONG64_OpLOONG64MOVDload(v *Value) bool { v.AddArg3(ptr, idx, mem) return true } + // match: (MOVDload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVDloadidx ptr (SLLVconst [shift] idx) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDshiftLLV { + break + } + shift := auxIntToInt64(v_0.AuxInt) + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpLOONG64MOVDloadidx) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLVconst, typ.Int64) + v0.AuxInt = int64ToAuxInt(shift) + v0.AddArg(idx) + v.AddArg3(ptr, v0, mem) + return true + } return false } func rewriteValueLOONG64_OpLOONG64MOVDloadidx(v *Value) bool { @@ -3228,6 +3325,7 @@ func rewriteValueLOONG64_OpLOONG64MOVDstore(v *Value) bool { v_0 := v.Args[0] b := v.Block config := b.Func.Config + typ := &b.Func.Config.Types // match: (MOVDstore [off] {sym} ptr (MOVVgpfp val) mem) // result: (MOVVstore [off] {sym} ptr val mem) for { @@ -3310,6 +3408,30 @@ func rewriteValueLOONG64_OpLOONG64MOVDstore(v *Value) bool { v.AddArg4(ptr, idx, val, mem) return true } + // match: (MOVDstore [off] {sym} (ADDshiftLLV [shift] ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (MOVDstoreidx ptr (SLLVconst [shift] idx) val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDshiftLLV { + break + } + shift := auxIntToInt64(v_0.AuxInt) + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpLOONG64MOVDstoreidx) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLVconst, typ.Int64) + v0.AuxInt = int64ToAuxInt(shift) + v0.AddArg(idx) + v.AddArg4(ptr, v0, val, mem) + return true + } return false } func rewriteValueLOONG64_OpLOONG64MOVDstoreidx(v *Value) bool { @@ -3362,6 +3484,7 @@ func rewriteValueLOONG64_OpLOONG64MOVFload(v *Value) bool { v_0 := v.Args[0] b := v.Block config := b.Func.Config + typ := &b.Func.Config.Types // match: (MOVFload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) // result: (MOVWgpfp val) for { @@ -3441,6 +3564,29 @@ func rewriteValueLOONG64_OpLOONG64MOVFload(v *Value) bool { v.AddArg3(ptr, idx, mem) return true } + // match: (MOVFload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVFloadidx ptr (SLLVconst [shift] idx) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDshiftLLV { + break + } + shift := auxIntToInt64(v_0.AuxInt) + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpLOONG64MOVFloadidx) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLVconst, typ.Int64) + v0.AuxInt = int64ToAuxInt(shift) + v0.AddArg(idx) + v.AddArg3(ptr, v0, mem) + return true + } return false } func rewriteValueLOONG64_OpLOONG64MOVFloadidx(v *Value) bool { @@ -3491,6 +3637,7 @@ func rewriteValueLOONG64_OpLOONG64MOVFstore(v *Value) bool { v_0 := v.Args[0] b := v.Block config := b.Func.Config + typ := &b.Func.Config.Types // match: (MOVFstore [off] {sym} ptr (MOVWgpfp val) mem) // result: (MOVWstore [off] {sym} ptr val mem) for { @@ -3573,6 +3720,30 @@ func rewriteValueLOONG64_OpLOONG64MOVFstore(v *Value) bool { v.AddArg4(ptr, idx, val, mem) return true } + // match: (MOVFstore [off] {sym} (ADDshiftLLV [shift] ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (MOVFstoreidx ptr (SLLVconst [shift] idx) val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDshiftLLV { + break + } + shift := auxIntToInt64(v_0.AuxInt) + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpLOONG64MOVFstoreidx) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLVconst, typ.Int64) + v0.AuxInt = int64ToAuxInt(shift) + v0.AddArg(idx) + v.AddArg4(ptr, v0, val, mem) + return true + } return false } func rewriteValueLOONG64_OpLOONG64MOVFstoreidx(v *Value) bool { @@ -3625,6 +3796,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHUload(v *Value) bool { v_0 := v.Args[0] b := v.Block config := b.Func.Config + typ := &b.Func.Config.Types // match: (MOVHUload [off] {sym} ptr (MOVHstore [off] {sym} ptr x _)) // result: (MOVHUreg x) for { @@ -3704,6 +3876,29 @@ func rewriteValueLOONG64_OpLOONG64MOVHUload(v *Value) bool { v.AddArg3(ptr, idx, mem) return true } + // match: (MOVHUload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVHUloadidx ptr (SLLVconst [shift] idx) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDshiftLLV { + break + } + shift := auxIntToInt64(v_0.AuxInt) + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpLOONG64MOVHUloadidx) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLVconst, typ.Int64) + v0.AuxInt = int64ToAuxInt(shift) + v0.AddArg(idx) + v.AddArg3(ptr, v0, mem) + return true + } // match: (MOVHUload [off] {sym} (SB) _) // cond: symIsRO(sym) // result: (MOVVconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))]) @@ -3894,6 +4089,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool { v_0 := v.Args[0] b := v.Block config := b.Func.Config + typ := &b.Func.Config.Types // match: (MOVHload [off] {sym} ptr (MOVHstore [off] {sym} ptr x _)) // result: (MOVHreg x) for { @@ -3973,6 +4169,29 @@ func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool { v.AddArg3(ptr, idx, mem) return true } + // match: (MOVHload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVHloadidx ptr (SLLVconst [shift] idx) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDshiftLLV { + break + } + shift := auxIntToInt64(v_0.AuxInt) + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpLOONG64MOVHloadidx) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLVconst, typ.Int64) + v0.AuxInt = int64ToAuxInt(shift) + v0.AddArg(idx) + v.AddArg3(ptr, v0, mem) + return true + } // match: (MOVHload [off] {sym} (SB) _) // cond: symIsRO(sym) // result: (MOVVconst [int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))]) @@ -4165,6 +4384,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHstore(v *Value) bool { v_0 := v.Args[0] b := v.Block config := b.Func.Config + typ := &b.Func.Config.Types // match: (MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVHstore [off1+int32(off2)] {sym} ptr val mem) @@ -4298,6 +4518,30 @@ func rewriteValueLOONG64_OpLOONG64MOVHstore(v *Value) bool { v.AddArg4(ptr, idx, val, mem) return true } + // match: (MOVHstore [off] {sym} (ADDshiftLLV [shift] ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (MOVHstoreidx ptr (SLLVconst [shift] idx) val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDshiftLLV { + break + } + shift := auxIntToInt64(v_0.AuxInt) + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpLOONG64MOVHstoreidx) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLVconst, typ.Int64) + v0.AuxInt = int64ToAuxInt(shift) + v0.AddArg(idx) + v.AddArg4(ptr, v0, val, mem) + return true + } return false } func rewriteValueLOONG64_OpLOONG64MOVHstoreidx(v *Value) bool { @@ -4350,6 +4594,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVload(v *Value) bool { v_0 := v.Args[0] b := v.Block config := b.Func.Config + typ := &b.Func.Config.Types // match: (MOVVload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _)) // result: (MOVVfpgp val) for { @@ -4446,6 +4691,29 @@ func rewriteValueLOONG64_OpLOONG64MOVVload(v *Value) bool { v.AddArg3(ptr, idx, mem) return true } + // match: (MOVVload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVVloadidx ptr (SLLVconst [shift] idx) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDshiftLLV { + break + } + shift := auxIntToInt64(v_0.AuxInt) + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpLOONG64MOVVloadidx) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLVconst, typ.Int64) + v0.AuxInt = int64ToAuxInt(shift) + v0.AddArg(idx) + v.AddArg3(ptr, v0, mem) + return true + } // match: (MOVVload [off] {sym} (SB) _) // cond: symIsRO(sym) // result: (MOVVconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))]) @@ -4551,6 +4819,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVstore(v *Value) bool { v_0 := v.Args[0] b := v.Block config := b.Func.Config + typ := &b.Func.Config.Types // match: (MOVVstore [off] {sym} ptr (MOVVfpgp val) mem) // result: (MOVDstore [off] {sym} ptr val mem) for { @@ -4633,6 +4902,30 @@ func rewriteValueLOONG64_OpLOONG64MOVVstore(v *Value) bool { v.AddArg4(ptr, idx, val, mem) return true } + // match: (MOVVstore [off] {sym} (ADDshiftLLV [shift] ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (MOVVstoreidx ptr (SLLVconst [shift] idx) val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDshiftLLV { + break + } + shift := auxIntToInt64(v_0.AuxInt) + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpLOONG64MOVVstoreidx) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLVconst, typ.Int64) + v0.AuxInt = int64ToAuxInt(shift) + v0.AddArg(idx) + v.AddArg4(ptr, v0, val, mem) + return true + } return false } func rewriteValueLOONG64_OpLOONG64MOVVstoreidx(v *Value) bool { @@ -4784,6 +5077,29 @@ func rewriteValueLOONG64_OpLOONG64MOVWUload(v *Value) bool { v.AddArg3(ptr, idx, mem) return true } + // match: (MOVWUload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVWUloadidx ptr (SLLVconst [shift] idx) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDshiftLLV { + break + } + shift := auxIntToInt64(v_0.AuxInt) + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpLOONG64MOVWUloadidx) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLVconst, typ.Int64) + v0.AuxInt = int64ToAuxInt(shift) + v0.AddArg(idx) + v.AddArg3(ptr, v0, mem) + return true + } // match: (MOVWUload [off] {sym} (SB) _) // cond: symIsRO(sym) // result: (MOVVconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) @@ -5007,6 +5323,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool { v_0 := v.Args[0] b := v.Block config := b.Func.Config + typ := &b.Func.Config.Types // match: (MOVWload [off] {sym} ptr (MOVWstore [off] {sym} ptr x _)) // result: (MOVWreg x) for { @@ -5086,6 +5403,29 @@ func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool { v.AddArg3(ptr, idx, mem) return true } + // match: (MOVWload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVWloadidx ptr (SLLVconst [shift] idx) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDshiftLLV { + break + } + shift := auxIntToInt64(v_0.AuxInt) + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpLOONG64MOVWloadidx) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLVconst, typ.Int64) + v0.AuxInt = int64ToAuxInt(shift) + v0.AddArg(idx) + v.AddArg3(ptr, v0, mem) + return true + } // match: (MOVWload [off] {sym} (SB) _) // cond: symIsRO(sym) // result: (MOVVconst [int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))]) @@ -5333,6 +5673,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWstore(v *Value) bool { v_0 := v.Args[0] b := v.Block config := b.Func.Config + typ := &b.Func.Config.Types // match: (MOVWstore [off] {sym} ptr (MOVWfpgp val) mem) // result: (MOVFstore [off] {sym} ptr val mem) for { @@ -5449,6 +5790,30 @@ func rewriteValueLOONG64_OpLOONG64MOVWstore(v *Value) bool { v.AddArg4(ptr, idx, val, mem) return true } + // match: (MOVWstore [off] {sym} (ADDshiftLLV [shift] ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (MOVWstoreidx ptr (SLLVconst [shift] idx) val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDshiftLLV { + break + } + shift := auxIntToInt64(v_0.AuxInt) + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpLOONG64MOVWstoreidx) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLVconst, typ.Int64) + v0.AuxInt = int64ToAuxInt(shift) + v0.AddArg(idx) + v.AddArg4(ptr, v0, val, mem) + return true + } return false } func rewriteValueLOONG64_OpLOONG64MOVWstoreidx(v *Value) bool { diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go index fda02e64d19..ff696337ef8 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go @@ -6562,6 +6562,23 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool { v.AddArg2(x, y) return true } + // match: (Select0 (Add32carrywithcarry x y c)) + // result: (ADD c (ADD x y)) + for { + if v_0.Op != OpAdd32carrywithcarry { + break + } + t := v_0.Type + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpMIPSADD) + v.Type = t.FieldType(0) + v0 := b.NewValue0(v.Pos, OpMIPSADD, t.FieldType(0)) + v0.AddArg2(x, y) + v.AddArg2(c, v0) + return true + } // match: (Select0 (Sub32carry x y)) // result: (SUB x y) for { @@ -6759,6 +6776,29 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool { v.AddArg2(x, v0) return true } + // match: (Select1 (Add32carrywithcarry x y c)) + // result: (OR (SGTU x xy:(ADD x y)) (SGTU xy (ADD c xy))) + for { + if v_0.Op != OpAdd32carrywithcarry { + break + } + t := v_0.Type + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpMIPSOR) + v.Type = typ.Bool + v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) + xy := b.NewValue0(v.Pos, OpMIPSADD, t.FieldType(0)) + xy.AddArg2(x, y) + v0.AddArg2(x, xy) + v2 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpMIPSADD, t.FieldType(0)) + v3.AddArg2(c, xy) + v2.AddArg2(xy, v3) + v.AddArg2(v0, v2) + return true + } // match: (Select1 (Sub32carry x y)) // result: (SGTU (SUB x y) x) for { diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index 050ace83dea..2225aee9753 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -4125,18 +4125,19 @@ func rewriteValuePPC64_OpOffPtr(v *Value) bool { func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ADD l:(MULLD x y) z) + b := v.Block + // match: (ADD z l:(MULLD x y)) // cond: buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) - // result: (MADDLD x y z) + // result: (MADDLD x y z ) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - l := v_0 + z := v_0 + l := v_1 if l.Op != OpPPC64MULLD { continue } y := l.Args[1] x := l.Args[0] - z := v_1 if !(buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l)) { continue } @@ -4146,6 +4147,30 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { } break } + // match: (ADD z l:(MULLDconst [x] y)) + // cond: buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) + // result: (MADDLD (MOVDconst [int64(x)]) y z ) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + z := v_0 + l := v_1 + if l.Op != OpPPC64MULLDconst { + continue + } + mt := l.Type + x := auxIntToInt32(l.AuxInt) + y := l.Args[0] + if !(buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l)) { + continue + } + v.reset(OpPPC64MADDLD) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, mt) + v0.AuxInt = int64ToAuxInt(int64(x)) + v.AddArg3(v0, y, z) + return true + } + break + } // match: (ADD x (MOVDconst [c])) // cond: is32Bit(c) && !t.IsPtr() // result: (ADDconst [c] x) @@ -4239,6 +4264,52 @@ func rewriteValuePPC64_OpPPC64ADDE(v *Value) bool { } func rewriteValuePPC64_OpPPC64ADDconst(v *Value) bool { v_0 := v.Args[0] + b := v.Block + // match: (ADDconst [z] l:(MULLD x y)) + // cond: buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) + // result: (MADDLD x y (MOVDconst [int64(z)])) + for { + at := v.Type + z := auxIntToInt64(v.AuxInt) + l := v_0 + if l.Op != OpPPC64MULLD { + break + } + y := l.Args[1] + x := l.Args[0] + if !(buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l)) { + break + } + v.reset(OpPPC64MADDLD) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, at) + v0.AuxInt = int64ToAuxInt(int64(z)) + v.AddArg3(x, y, v0) + return true + } + // match: (ADDconst [z] l:(MULLDconst [x] y)) + // cond: buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) + // result: (MADDLD (MOVDconst [int64(x)]) y (MOVDconst [int64(z)])) + for { + at := v.Type + z := auxIntToInt64(v.AuxInt) + l := v_0 + if l.Op != OpPPC64MULLDconst { + break + } + mt := l.Type + x := auxIntToInt32(l.AuxInt) + y := l.Args[0] + if !(buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l)) { + break + } + v.reset(OpPPC64MADDLD) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, mt) + v0.AuxInt = int64ToAuxInt(int64(x)) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, at) + v1.AuxInt = int64ToAuxInt(int64(z)) + v.AddArg3(v0, y, v1) + return true + } // match: (ADDconst [c] (ADDconst [d] x)) // cond: is32Bit(c+d) // result: (ADDconst [c+d] x) diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go index a7b4cf1bc40..191c7b3d484 100644 --- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go +++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go @@ -4,6 +4,7 @@ package ssa import "internal/buildcfg" import "math" +import "math/bits" import "cmd/compile/internal/types" func rewriteValueRISCV64(v *Value) bool { @@ -137,11 +138,13 @@ func rewriteValueRISCV64(v *Value) bool { case OpConst32: return rewriteValueRISCV64_OpConst32(v) case OpConst32F: - return rewriteValueRISCV64_OpConst32F(v) + v.Op = OpRISCV64FMOVFconst + return true case OpConst64: return rewriteValueRISCV64_OpConst64(v) case OpConst64F: - return rewriteValueRISCV64_OpConst64F(v) + v.Op = OpRISCV64FMOVDconst + return true case OpConst8: return rewriteValueRISCV64_OpConst8(v) case OpConstBool: @@ -1098,20 +1101,6 @@ func rewriteValueRISCV64_OpConst32(v *Value) bool { return true } } -func rewriteValueRISCV64_OpConst32F(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (Const32F [val]) - // result: (FMVSX (MOVDconst [int64(math.Float32bits(val))])) - for { - val := auxIntToFloat32(v.AuxInt) - v.reset(OpRISCV64FMVSX) - v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(int64(math.Float32bits(val))) - v.AddArg(v0) - return true - } -} func rewriteValueRISCV64_OpConst64(v *Value) bool { // match: (Const64 [val]) // result: (MOVDconst [int64(val)]) @@ -1122,20 +1111,6 @@ func rewriteValueRISCV64_OpConst64(v *Value) bool { return true } } -func rewriteValueRISCV64_OpConst64F(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (Const64F [val]) - // result: (FMVDX (MOVDconst [int64(math.Float64bits(val))])) - for { - val := auxIntToFloat64(v.AuxInt) - v.reset(OpRISCV64FMVDX) - v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(int64(math.Float64bits(val))) - v.AddArg(v0) - return true - } -} func rewriteValueRISCV64_OpConst8(v *Value) bool { // match: (Const8 [val]) // result: (MOVDconst [int64(val)]) @@ -3608,20 +3583,16 @@ func rewriteValueRISCV64_OpRISCV64FEQD(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (FEQD x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(-1)))]))) - // result: (ANDI [1] (FCLASSD x)) + // match: (FEQD x (FMOVDconst [math.Inf(-1)])) + // result: (ANDI [0b00_0000_0001] (FCLASSD x)) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpRISCV64FMVDX { - continue - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != int64(math.Float64bits(math.Inf(-1))) { + if v_1.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != math.Inf(-1) { continue } v.reset(OpRISCV64ANDI) - v.AuxInt = int64ToAuxInt(1) + v.AuxInt = int64ToAuxInt(0b00_0000_0001) v0 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64) v0.AddArg(x) v.AddArg(v0) @@ -3629,21 +3600,17 @@ func rewriteValueRISCV64_OpRISCV64FEQD(v *Value) bool { } break } - // match: (FEQD x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(1)))]))) - // result: (SNEZ (ANDI [1<<7] (FCLASSD x))) + // match: (FEQD x (FMOVDconst [math.Inf(1)])) + // result: (SNEZ (ANDI [0b00_1000_0000] (FCLASSD x))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpRISCV64FMVDX { - continue - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != int64(math.Float64bits(math.Inf(1))) { + if v_1.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != math.Inf(1) { continue } v.reset(OpRISCV64SNEZ) v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64) - v0.AuxInt = int64ToAuxInt(1 << 7) + v0.AuxInt = int64ToAuxInt(0b00_1000_0000) v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64) v1.AddArg(x) v0.AddArg(v1) @@ -3659,40 +3626,64 @@ func rewriteValueRISCV64_OpRISCV64FLED(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (FLED (FMVDX (MOVDconst [int64(math.Float64bits(-math.MaxFloat64))])) x) - // result: (SNEZ (ANDI [0xff &^ 1] (FCLASSD x))) + // match: (FLED (FMOVDconst [-math.MaxFloat64]) x) + // result: (SNEZ (ANDI [0b00_1111_1110] (FCLASSD x))) for { - if v_0.Op != OpRISCV64FMVDX { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_0_0.AuxInt) != int64(math.Float64bits(-math.MaxFloat64)) { + if v_0.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_0.AuxInt) != -math.MaxFloat64 { break } x := v_1 v.reset(OpRISCV64SNEZ) v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64) - v0.AuxInt = int64ToAuxInt(0xff &^ 1) + v0.AuxInt = int64ToAuxInt(0b00_1111_1110) v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64) v1.AddArg(x) v0.AddArg(v1) v.AddArg(v0) return true } - // match: (FLED x (FMVDX (MOVDconst [int64(math.Float64bits(math.MaxFloat64))]))) - // result: (SNEZ (ANDI [0xff &^ (1<<7)] (FCLASSD x))) + // match: (FLED x (FMOVDconst [math.MaxFloat64])) + // result: (SNEZ (ANDI [0b00_0111_1111] (FCLASSD x))) for { x := v_0 - if v_1.Op != OpRISCV64FMVDX { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != int64(math.Float64bits(math.MaxFloat64)) { + if v_1.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != math.MaxFloat64 { break } v.reset(OpRISCV64SNEZ) v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64) - v0.AuxInt = int64ToAuxInt(0xff &^ (1 << 7)) + v0.AuxInt = int64ToAuxInt(0b00_0111_1111) + v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (FLED (FMOVDconst [+0x1p-1022]) x) + // result: (SNEZ (ANDI [0b00_1100_0000] (FCLASSD x))) + for { + if v_0.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_0.AuxInt) != +0x1p-1022 { + break + } + x := v_1 + v.reset(OpRISCV64SNEZ) + v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64) + v0.AuxInt = int64ToAuxInt(0b00_1100_0000) + v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (FLED x (FMOVDconst [-0x1p-1022])) + // result: (SNEZ (ANDI [0b00_0000_0011] (FCLASSD x))) + for { + x := v_0 + if v_1.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != -0x1p-1022 { + break + } + v.reset(OpRISCV64SNEZ) + v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64) + v0.AuxInt = int64ToAuxInt(0b00_0000_0011) v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64) v1.AddArg(x) v0.AddArg(v1) @@ -3706,38 +3697,62 @@ func rewriteValueRISCV64_OpRISCV64FLTD(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (FLTD x (FMVDX (MOVDconst [int64(math.Float64bits(-math.MaxFloat64))]))) - // result: (ANDI [1] (FCLASSD x)) + // match: (FLTD x (FMOVDconst [-math.MaxFloat64])) + // result: (ANDI [0b00_0000_0001] (FCLASSD x)) for { x := v_0 - if v_1.Op != OpRISCV64FMVDX { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != int64(math.Float64bits(-math.MaxFloat64)) { + if v_1.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != -math.MaxFloat64 { break } v.reset(OpRISCV64ANDI) - v.AuxInt = int64ToAuxInt(1) + v.AuxInt = int64ToAuxInt(0b00_0000_0001) v0 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64) v0.AddArg(x) v.AddArg(v0) return true } - // match: (FLTD (FMVDX (MOVDconst [int64(math.Float64bits(math.MaxFloat64))])) x) - // result: (SNEZ (ANDI [1<<7] (FCLASSD x))) + // match: (FLTD (FMOVDconst [math.MaxFloat64]) x) + // result: (SNEZ (ANDI [0b00_1000_0000] (FCLASSD x))) for { - if v_0.Op != OpRISCV64FMVDX { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_0_0.AuxInt) != int64(math.Float64bits(math.MaxFloat64)) { + if v_0.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_0.AuxInt) != math.MaxFloat64 { break } x := v_1 v.reset(OpRISCV64SNEZ) v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64) - v0.AuxInt = int64ToAuxInt(1 << 7) + v0.AuxInt = int64ToAuxInt(0b00_1000_0000) + v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (FLTD x (FMOVDconst [+0x1p-1022])) + // result: (SNEZ (ANDI [0b00_0011_1111] (FCLASSD x))) + for { + x := v_0 + if v_1.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != +0x1p-1022 { + break + } + v.reset(OpRISCV64SNEZ) + v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64) + v0.AuxInt = int64ToAuxInt(0b00_0011_1111) + v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (FLTD (FMOVDconst [-0x1p-1022]) x) + // result: (SNEZ (ANDI [0b00_1111_1100] (FCLASSD x))) + for { + if v_0.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_0.AuxInt) != -0x1p-1022 { + break + } + x := v_1 + v.reset(OpRISCV64SNEZ) + v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64) + v0.AuxInt = int64ToAuxInt(0b00_1111_1100) v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64) v1.AddArg(x) v0.AddArg(v1) @@ -4175,21 +4190,17 @@ func rewriteValueRISCV64_OpRISCV64FNED(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (FNED x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(-1)))]))) - // result: (SEQZ (ANDI [1] (FCLASSD x))) + // match: (FNED x (FMOVDconst [math.Inf(-1)])) + // result: (SEQZ (ANDI [0b00_0000_0001] (FCLASSD x))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpRISCV64FMVDX { - continue - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != int64(math.Float64bits(math.Inf(-1))) { + if v_1.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != math.Inf(-1) { continue } v.reset(OpRISCV64SEQZ) v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64) - v0.AuxInt = int64ToAuxInt(1) + v0.AuxInt = int64ToAuxInt(0b00_0000_0001) v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64) v1.AddArg(x) v0.AddArg(v1) @@ -4198,21 +4209,17 @@ func rewriteValueRISCV64_OpRISCV64FNED(v *Value) bool { } break } - // match: (FNED x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(1)))]))) - // result: (SEQZ (ANDI [1<<7] (FCLASSD x))) + // match: (FNED x (FMOVDconst [math.Inf(1)])) + // result: (SEQZ (ANDI [0b00_1000_0000] (FCLASSD x))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpRISCV64FMVDX { - continue - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != int64(math.Float64bits(math.Inf(1))) { + if v_1.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != math.Inf(1) { continue } v.reset(OpRISCV64SEQZ) v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64) - v0.AuxInt = int64ToAuxInt(1 << 7) + v0.AuxInt = int64ToAuxInt(0b00_1000_0000) v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64) v1.AddArg(x) v0.AddArg(v1) @@ -7114,6 +7121,8 @@ func rewriteValueRISCV64_OpRISCV64RORW(v *Value) bool { } func rewriteValueRISCV64_OpRISCV64SEQZ(v *Value) bool { v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types // match: (SEQZ (NEG x)) // result: (SEQZ x) for { @@ -7147,6 +7156,56 @@ func rewriteValueRISCV64_OpRISCV64SEQZ(v *Value) bool { v.AddArg(x) return true } + // match: (SEQZ (ANDI [c] (FCLASSD (FNEGD x)))) + // result: (SEQZ (ANDI [(c&0b11_0000_0000)|int64(bits.Reverse8(uint8(c))&0b1111_1111)] (FCLASSD x))) + for { + if v_0.Op != OpRISCV64ANDI { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpRISCV64FCLASSD { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpRISCV64FNEGD { + break + } + x := v_0_0_0.Args[0] + v.reset(OpRISCV64SEQZ) + v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64) + v0.AuxInt = int64ToAuxInt((c & 0b11_0000_0000) | int64(bits.Reverse8(uint8(c))&0b1111_1111)) + v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (SEQZ (ANDI [c] (FCLASSD (FABSD x)))) + // result: (SEQZ (ANDI [(c&0b11_1111_0000)|int64(bits.Reverse8(uint8(c))&0b0000_1111)] (FCLASSD x))) + for { + if v_0.Op != OpRISCV64ANDI { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpRISCV64FCLASSD { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpRISCV64FABSD { + break + } + x := v_0_0_0.Args[0] + v.reset(OpRISCV64SEQZ) + v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64) + v0.AuxInt = int64ToAuxInt((c & 0b11_1111_0000) | int64(bits.Reverse8(uint8(c))&0b0000_1111)) + v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } return false } func rewriteValueRISCV64_OpRISCV64SLL(v *Value) bool { @@ -7405,6 +7464,8 @@ func rewriteValueRISCV64_OpRISCV64SLTU(v *Value) bool { } func rewriteValueRISCV64_OpRISCV64SNEZ(v *Value) bool { v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types // match: (SNEZ (NEG x)) // result: (SNEZ x) for { @@ -7438,6 +7499,56 @@ func rewriteValueRISCV64_OpRISCV64SNEZ(v *Value) bool { v.AddArg(x) return true } + // match: (SNEZ (ANDI [c] (FCLASSD (FNEGD x)))) + // result: (SNEZ (ANDI [(c&0b11_0000_0000)|int64(bits.Reverse8(uint8(c))&0b1111_1111)] (FCLASSD x))) + for { + if v_0.Op != OpRISCV64ANDI { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpRISCV64FCLASSD { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpRISCV64FNEGD { + break + } + x := v_0_0_0.Args[0] + v.reset(OpRISCV64SNEZ) + v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64) + v0.AuxInt = int64ToAuxInt((c & 0b11_0000_0000) | int64(bits.Reverse8(uint8(c))&0b1111_1111)) + v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (SNEZ (ANDI [c] (FCLASSD (FABSD x)))) + // result: (SNEZ (ANDI [(c&0b11_1111_0000)|int64(bits.Reverse8(uint8(c))&0b0000_1111)] (FCLASSD x))) + for { + if v_0.Op != OpRISCV64ANDI { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpRISCV64FCLASSD { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpRISCV64FABSD { + break + } + x := v_0_0_0.Args[0] + v.reset(OpRISCV64SNEZ) + v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64) + v0.AuxInt = int64ToAuxInt((c & 0b11_1111_0000) | int64(bits.Reverse8(uint8(c))&0b0000_1111)) + v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } return false } func rewriteValueRISCV64_OpRISCV64SRA(v *Value) bool { @@ -9998,6 +10109,50 @@ func rewriteBlockRISCV64(b *Block) bool { b.resetWithControl2(BlockRISCV64BGEU, y, v0) return true } + // match: (BEQZ (ANDI [c] (FCLASSD (FNEGD x))) yes no) + // result: (BEQZ (ANDI [(c&0b11_0000_0000)|int64(bits.Reverse8(uint8(c))&0b1111_1111)] (FCLASSD x)) yes no) + for b.Controls[0].Op == OpRISCV64ANDI { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpRISCV64FCLASSD { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpRISCV64FNEGD { + break + } + x := v_0_0_0.Args[0] + v0 := b.NewValue0(v_0.Pos, OpRISCV64ANDI, typ.Int64) + v0.AuxInt = int64ToAuxInt((c & 0b11_0000_0000) | int64(bits.Reverse8(uint8(c))&0b1111_1111)) + v1 := b.NewValue0(v_0.Pos, OpRISCV64FCLASSD, typ.Int64) + v1.AddArg(x) + v0.AddArg(v1) + b.resetWithControl(BlockRISCV64BEQZ, v0) + return true + } + // match: (BEQZ (ANDI [c] (FCLASSD (FABSD x))) yes no) + // result: (BEQZ (ANDI [(c&0b11_1111_0000)|int64(bits.Reverse8(uint8(c))&0b0000_1111)] (FCLASSD x)) yes no) + for b.Controls[0].Op == OpRISCV64ANDI { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpRISCV64FCLASSD { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpRISCV64FABSD { + break + } + x := v_0_0_0.Args[0] + v0 := b.NewValue0(v_0.Pos, OpRISCV64ANDI, typ.Int64) + v0.AuxInt = int64ToAuxInt((c & 0b11_1111_0000) | int64(bits.Reverse8(uint8(c))&0b0000_1111)) + v1 := b.NewValue0(v_0.Pos, OpRISCV64FCLASSD, typ.Int64) + v1.AddArg(x) + v0.AddArg(v1) + b.resetWithControl(BlockRISCV64BEQZ, v0) + return true + } case BlockRISCV64BGE: // match: (BGE (MOVDconst [0]) cond yes no) // result: (BLEZ cond yes no) @@ -10199,6 +10354,50 @@ func rewriteBlockRISCV64(b *Block) bool { b.resetWithControl2(BlockRISCV64BLTU, y, v0) return true } + // match: (BNEZ (ANDI [c] (FCLASSD (FNEGD x))) yes no) + // result: (BNEZ (ANDI [(c&0b11_0000_0000)|int64(bits.Reverse8(uint8(c))&0b1111_1111)] (FCLASSD x)) yes no) + for b.Controls[0].Op == OpRISCV64ANDI { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpRISCV64FCLASSD { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpRISCV64FNEGD { + break + } + x := v_0_0_0.Args[0] + v0 := b.NewValue0(v_0.Pos, OpRISCV64ANDI, typ.Int64) + v0.AuxInt = int64ToAuxInt((c & 0b11_0000_0000) | int64(bits.Reverse8(uint8(c))&0b1111_1111)) + v1 := b.NewValue0(v_0.Pos, OpRISCV64FCLASSD, typ.Int64) + v1.AddArg(x) + v0.AddArg(v1) + b.resetWithControl(BlockRISCV64BNEZ, v0) + return true + } + // match: (BNEZ (ANDI [c] (FCLASSD (FABSD x))) yes no) + // result: (BNEZ (ANDI [(c&0b11_1111_0000)|int64(bits.Reverse8(uint8(c))&0b0000_1111)] (FCLASSD x)) yes no) + for b.Controls[0].Op == OpRISCV64ANDI { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpRISCV64FCLASSD { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpRISCV64FABSD { + break + } + x := v_0_0_0.Args[0] + v0 := b.NewValue0(v_0.Pos, OpRISCV64ANDI, typ.Int64) + v0.AuxInt = int64ToAuxInt((c & 0b11_1111_0000) | int64(bits.Reverse8(uint8(c))&0b0000_1111)) + v1 := b.NewValue0(v_0.Pos, OpRISCV64FCLASSD, typ.Int64) + v1.AddArg(x) + v0.AddArg(v1) + b.resetWithControl(BlockRISCV64BNEZ, v0) + return true + } case BlockIf: // match: (If cond yes no) // result: (BNEZ (MOVBUreg cond) yes no) diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go index a164a6eee55..faba41b3e5e 100644 --- a/src/cmd/compile/internal/ssa/rewriteWasm.go +++ b/src/cmd/compile/internal/ssa/rewriteWasm.go @@ -48,6 +48,8 @@ func rewriteValueWasm(v *Value) bool { case OpAndB: v.Op = OpWasmI64And return true + case OpAvg64u: + return rewriteValueWasm_OpAvg64u(v) case OpBitLen16: return rewriteValueWasm_OpBitLen16(v) case OpBitLen32: @@ -228,6 +230,10 @@ func rewriteValueWasm(v *Value) bool { case OpGetClosurePtr: v.Op = OpWasmLoweredGetClosurePtr return true + case OpHmul64: + return rewriteValueWasm_OpHmul64(v) + case OpHmul64u: + return rewriteValueWasm_OpHmul64u(v) case OpInterCall: v.Op = OpWasmLoweredInterCall return true @@ -239,6 +245,8 @@ func rewriteValueWasm(v *Value) bool { case OpIsSliceInBounds: v.Op = OpWasmI64LeU return true + case OpLast: + return rewriteValueWasm_OpLast(v) case OpLeq16: return rewriteValueWasm_OpLeq16(v) case OpLeq16U: @@ -514,6 +522,10 @@ func rewriteValueWasm(v *Value) bool { return rewriteValueWasm_OpRsh8x64(v) case OpRsh8x8: return rewriteValueWasm_OpRsh8x8(v) + case OpSelect0: + return rewriteValueWasm_OpSelect0(v) + case OpSelect1: + return rewriteValueWasm_OpSelect1(v) case OpSignExt16to32: return rewriteValueWasm_OpSignExt16to32(v) case OpSignExt16to64: @@ -684,6 +696,27 @@ func rewriteValueWasm_OpAddr(v *Value) bool { return true } } +func rewriteValueWasm_OpAvg64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Avg64u x y) + // result: (I64Add (I64ShrU (I64Sub x y) (I64Const [1])) y) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64Add) + v0 := b.NewValue0(v.Pos, OpWasmI64ShrU, typ.Int64) + v1 := b.NewValue0(v.Pos, OpWasmI64Sub, typ.Int64) + v1.AddArg2(x, y) + v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v2.AuxInt = int64ToAuxInt(1) + v0.AddArg2(v1, v2) + v.AddArg2(v0, y) + return true + } +} func rewriteValueWasm_OpBitLen16(v *Value) bool { v_0 := v.Args[0] b := v.Block @@ -1162,6 +1195,108 @@ func rewriteValueWasm_OpEq8(v *Value) bool { return true } } +func rewriteValueWasm_OpHmul64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul64 x y) + // result: (Last x0: (ZeroExt32to64 x) x1: (I64ShrS x (I64Const [32])) y0: (ZeroExt32to64 y) y1: (I64ShrS y (I64Const [32])) x0y0: (I64Mul x0 y0) tt: (I64Add (I64Mul x1 y0) (I64ShrU x0y0 (I64Const [32]))) w1: (I64Add (I64Mul x0 y1) (ZeroExt32to64 tt)) w2: (I64ShrS tt (I64Const [32])) (I64Add (I64Add (I64Mul x1 y1) w2) (I64ShrS w1 (I64Const [32])))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLast) + v.Type = t + x0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + x0.AddArg(x) + x1 := b.NewValue0(v.Pos, OpWasmI64ShrS, typ.Int64) + v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v2.AuxInt = int64ToAuxInt(32) + x1.AddArg2(x, v2) + y0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + y0.AddArg(y) + y1 := b.NewValue0(v.Pos, OpWasmI64ShrS, typ.Int64) + y1.AddArg2(y, v2) + x0y0 := b.NewValue0(v.Pos, OpWasmI64Mul, typ.Int64) + x0y0.AddArg2(x0, y0) + tt := b.NewValue0(v.Pos, OpWasmI64Add, typ.Int64) + v7 := b.NewValue0(v.Pos, OpWasmI64Mul, typ.Int64) + v7.AddArg2(x1, y0) + v8 := b.NewValue0(v.Pos, OpWasmI64ShrU, typ.Int64) + v8.AddArg2(x0y0, v2) + tt.AddArg2(v7, v8) + w1 := b.NewValue0(v.Pos, OpWasmI64Add, typ.Int64) + v10 := b.NewValue0(v.Pos, OpWasmI64Mul, typ.Int64) + v10.AddArg2(x0, y1) + v11 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v11.AddArg(tt) + w1.AddArg2(v10, v11) + w2 := b.NewValue0(v.Pos, OpWasmI64ShrS, typ.Int64) + w2.AddArg2(tt, v2) + v13 := b.NewValue0(v.Pos, OpWasmI64Add, typ.Int64) + v14 := b.NewValue0(v.Pos, OpWasmI64Add, typ.Int64) + v15 := b.NewValue0(v.Pos, OpWasmI64Mul, typ.Int64) + v15.AddArg2(x1, y1) + v14.AddArg2(v15, w2) + v16 := b.NewValue0(v.Pos, OpWasmI64ShrS, typ.Int64) + v16.AddArg2(w1, v2) + v13.AddArg2(v14, v16) + v.AddArgs(x0, x1, y0, y1, x0y0, tt, w1, w2, v13) + return true + } +} +func rewriteValueWasm_OpHmul64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul64u x y) + // result: (Last x0: (ZeroExt32to64 x) x1: (I64ShrU x (I64Const [32])) y0: (ZeroExt32to64 y) y1: (I64ShrU y (I64Const [32])) w0: (I64Mul x0 y0) tt: (I64Add (I64Mul x1 y0) (I64ShrU w0 (I64Const [32]))) w1: (I64Add (I64Mul x0 y1) (ZeroExt32to64 tt)) w2: (I64ShrU tt (I64Const [32])) hi: (I64Add (I64Add (I64Mul x1 y1) w2) (I64ShrU w1 (I64Const [32])))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLast) + v.Type = t + x0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + x0.AddArg(x) + x1 := b.NewValue0(v.Pos, OpWasmI64ShrU, typ.Int64) + v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v2.AuxInt = int64ToAuxInt(32) + x1.AddArg2(x, v2) + y0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + y0.AddArg(y) + y1 := b.NewValue0(v.Pos, OpWasmI64ShrU, typ.Int64) + y1.AddArg2(y, v2) + w0 := b.NewValue0(v.Pos, OpWasmI64Mul, typ.Int64) + w0.AddArg2(x0, y0) + tt := b.NewValue0(v.Pos, OpWasmI64Add, typ.Int64) + v7 := b.NewValue0(v.Pos, OpWasmI64Mul, typ.Int64) + v7.AddArg2(x1, y0) + v8 := b.NewValue0(v.Pos, OpWasmI64ShrU, typ.Int64) + v8.AddArg2(w0, v2) + tt.AddArg2(v7, v8) + w1 := b.NewValue0(v.Pos, OpWasmI64Add, typ.Int64) + v10 := b.NewValue0(v.Pos, OpWasmI64Mul, typ.Int64) + v10.AddArg2(x0, y1) + v11 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v11.AddArg(tt) + w1.AddArg2(v10, v11) + w2 := b.NewValue0(v.Pos, OpWasmI64ShrU, typ.Int64) + w2.AddArg2(tt, v2) + hi := b.NewValue0(v.Pos, OpWasmI64Add, typ.Int64) + v14 := b.NewValue0(v.Pos, OpWasmI64Add, typ.Int64) + v15 := b.NewValue0(v.Pos, OpWasmI64Mul, typ.Int64) + v15.AddArg2(x1, y1) + v14.AddArg2(v15, w2) + v16 := b.NewValue0(v.Pos, OpWasmI64ShrU, typ.Int64) + v16.AddArg2(w1, v2) + hi.AddArg2(v14, v16) + v.AddArgs(x0, x1, y0, y1, w0, tt, w1, w2, hi) + return true + } +} func rewriteValueWasm_OpIsNonNil(v *Value) bool { v_0 := v.Args[0] b := v.Block @@ -1177,6 +1312,14 @@ func rewriteValueWasm_OpIsNonNil(v *Value) bool { return true } } +func rewriteValueWasm_OpLast(v *Value) bool { + // match: (Last ___) + // result: v.Args[len(v.Args)-1] + for { + v.copyOf(v.Args[len(v.Args)-1]) + return true + } +} func rewriteValueWasm_OpLeq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -3199,6 +3342,40 @@ func rewriteValueWasm_OpRsh8x8(v *Value) bool { return true } } +func rewriteValueWasm_OpSelect0(v *Value) bool { + v_0 := v.Args[0] + // match: (Select0 (Mul64uhilo x y)) + // result: (Hmul64u x y) + for { + t := v.Type + if v_0.Op != OpMul64uhilo { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpHmul64u) + v.Type = t + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueWasm_OpSelect1(v *Value) bool { + v_0 := v.Args[0] + // match: (Select1 (Mul64uhilo x y)) + // result: (I64Mul x y) + for { + if v_0.Op != OpMul64uhilo { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpWasmI64Mul) + v.AddArg2(x, y) + return true + } + return false +} func rewriteValueWasm_OpSignExt16to32(v *Value) bool { v_0 := v.Args[0] // match: (SignExt16to32 x:(I64Load16S _ _)) diff --git a/src/cmd/compile/internal/ssa/rewritedec64.go b/src/cmd/compile/internal/ssa/rewritedec64.go index 901dc758c30..a0388551b53 100644 --- a/src/cmd/compile/internal/ssa/rewritedec64.go +++ b/src/cmd/compile/internal/ssa/rewritedec64.go @@ -12,6 +12,8 @@ func rewriteValuedec64(v *Value) bool { return rewriteValuedec64_OpAnd64(v) case OpArg: return rewriteValuedec64_OpArg(v) + case OpAvg64u: + return rewriteValuedec64_OpAvg64u(v) case OpBitLen64: return rewriteValuedec64_OpBitLen64(v) case OpBswap64: @@ -27,10 +29,16 @@ func rewriteValuedec64(v *Value) bool { return true case OpEq64: return rewriteValuedec64_OpEq64(v) + case OpHmul64: + return rewriteValuedec64_OpHmul64(v) + case OpHmul64u: + return rewriteValuedec64_OpHmul64u(v) case OpInt64Hi: return rewriteValuedec64_OpInt64Hi(v) case OpInt64Lo: return rewriteValuedec64_OpInt64Lo(v) + case OpLast: + return rewriteValuedec64_OpLast(v) case OpLeq64: return rewriteValuedec64_OpLeq64(v) case OpLeq64U: @@ -57,6 +65,8 @@ func rewriteValuedec64(v *Value) bool { return rewriteValuedec64_OpLsh8x64(v) case OpMul64: return rewriteValuedec64_OpMul64(v) + case OpMul64uhilo: + return rewriteValuedec64_OpMul64uhilo(v) case OpNeg64: return rewriteValuedec64_OpNeg64(v) case OpNeq64: @@ -101,6 +111,10 @@ func rewriteValuedec64(v *Value) bool { return rewriteValuedec64_OpRsh8Ux64(v) case OpRsh8x64: return rewriteValuedec64_OpRsh8x64(v) + case OpSelect0: + return rewriteValuedec64_OpSelect0(v) + case OpSelect1: + return rewriteValuedec64_OpSelect1(v) case OpSignExt16to64: return rewriteValuedec64_OpSignExt16to64(v) case OpSignExt32to64: @@ -133,29 +147,33 @@ func rewriteValuedec64_OpAdd64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Add64 x y) - // result: (Int64Make (Add32withcarry (Int64Hi x) (Int64Hi y) (Select1 (Add32carry (Int64Lo x) (Int64Lo y)))) (Select0 (Add32carry (Int64Lo x) (Int64Lo y)))) + // match: (Add64 x y) + // result: (Last x0: (Int64Lo x) x1: (Int64Hi x) y0: (Int64Lo y) y1: (Int64Hi y) add: (Add32carry x0 y0) (Int64Make (Add32withcarry x1 y1 (Select1 add)) (Select0 add))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpInt64Make) - v0 := b.NewValue0(v.Pos, OpAdd32withcarry, typ.Int32) - v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) - v2.AddArg(y) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4 := b.NewValue0(v.Pos, OpAdd32carry, types.NewTuple(typ.UInt32, types.TypeFlags)) - v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) - v5.AddArg(x) - v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) - v6.AddArg(y) - v4.AddArg2(v5, v6) - v3.AddArg(v4) - v0.AddArg3(v1, v2, v3) - v7 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) - v7.AddArg(v4) - v.AddArg2(v0, v7) + v.reset(OpLast) + v.Type = t + x0 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + x0.AddArg(x) + x1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + x1.AddArg(x) + y0 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + y0.AddArg(y) + y1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + y1.AddArg(y) + add := b.NewValue0(v.Pos, OpAdd32carry, types.NewTuple(typ.UInt32, types.TypeFlags)) + add.AddArg2(x0, y0) + v5 := b.NewValue0(v.Pos, OpInt64Make, typ.UInt64) + v6 := b.NewValue0(v.Pos, OpAdd32withcarry, typ.UInt32) + v7 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v7.AddArg(add) + v6.AddArg3(x1, y1, v7) + v8 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) + v8.AddArg(add) + v5.AddArg2(v6, v8) + v.AddArg6(x0, x1, y0, y1, add, v5) return true } } @@ -268,6 +286,28 @@ func rewriteValuedec64_OpArg(v *Value) bool { } return false } +func rewriteValuedec64_OpAvg64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Avg64u x y) + // result: (Add64 (Rsh64Ux32 (Sub64 x y) (Const32 [1])) y) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpAdd64) + v0 := b.NewValue0(v.Pos, OpRsh64Ux32, t) + v1 := b.NewValue0(v.Pos, OpSub64, t) + v1.AddArg2(x, y) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int32ToAuxInt(1) + v0.AddArg2(v1, v2) + v.AddArg2(v0, y) + return true + } +} func rewriteValuedec64_OpBitLen64(v *Value) bool { v_0 := v.Args[0] b := v.Block @@ -430,6 +470,62 @@ func rewriteValuedec64_OpEq64(v *Value) bool { return true } } +func rewriteValuedec64_OpHmul64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul64 x y) + // result: (Last p: (Hmul64u x y) xSign: (Int64Make xs:(Rsh32x32 (Int64Hi x) (Const32 [31])) xs) ySign: (Int64Make ys:(Rsh32x32 (Int64Hi y) (Const32 [31])) ys) (Sub64 (Sub64 p (And64 xSign y)) (And64 ySign x))) + for { + x := v_0 + y := v_1 + v.reset(OpLast) + p := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64) + p.AddArg2(x, y) + xSign := b.NewValue0(v.Pos, OpInt64Make, typ.UInt64) + xs := b.NewValue0(v.Pos, OpRsh32x32, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v4.AuxInt = int32ToAuxInt(31) + xs.AddArg2(v3, v4) + xSign.AddArg2(xs, xs) + ySign := b.NewValue0(v.Pos, OpInt64Make, typ.UInt64) + ys := b.NewValue0(v.Pos, OpRsh32x32, typ.UInt32) + v7 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v7.AddArg(y) + ys.AddArg2(v7, v4) + ySign.AddArg2(ys, ys) + v8 := b.NewValue0(v.Pos, OpSub64, typ.Int64) + v9 := b.NewValue0(v.Pos, OpSub64, typ.Int64) + v10 := b.NewValue0(v.Pos, OpAnd64, typ.Int64) + v10.AddArg2(xSign, y) + v9.AddArg2(p, v10) + v11 := b.NewValue0(v.Pos, OpAnd64, typ.Int64) + v11.AddArg2(ySign, x) + v8.AddArg2(v9, v11) + v.AddArg4(p, xSign, ySign, v8) + return true + } +} +func rewriteValuedec64_OpHmul64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul64u x y) + // result: (Select0 (Mul64uhilo x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpMul64uhilo, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} func rewriteValuedec64_OpInt64Hi(v *Value) bool { v_0 := v.Args[0] // match: (Int64Hi (Int64Make hi _)) @@ -458,6 +554,14 @@ func rewriteValuedec64_OpInt64Lo(v *Value) bool { } return false } +func rewriteValuedec64_OpLast(v *Value) bool { + // match: (Last ___) + // result: v.Args[len(v.Args)-1] + for { + v.copyOf(v.Args[len(v.Args)-1]) + return true + } +} func rewriteValuedec64_OpLeq64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -1114,35 +1218,124 @@ func rewriteValuedec64_OpMul64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Mul64 x y) - // result: (Int64Make (Add32 (Mul32 (Int64Lo x) (Int64Hi y)) (Add32 (Mul32 (Int64Hi x) (Int64Lo y)) (Select0 (Mul32uhilo (Int64Lo x) (Int64Lo y))))) (Select1 (Mul32uhilo (Int64Lo x) (Int64Lo y)))) + // match: (Mul64 x y) + // result: (Last x0: (Int64Lo x) x1: (Int64Hi x) y0: (Int64Lo y) y1: (Int64Hi y) x0y0: (Mul32uhilo x0 y0) x0y0Hi: (Select0 x0y0) x0y0Lo: (Select1 x0y0) (Int64Make (Add32 x0y0Hi (Add32 (Mul32 x0 y1) (Mul32 x1 y0))) x0y0Lo)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpInt64Make) - v0 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) - v2.AddArg(x) - v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) - v3.AddArg(y) - v1.AddArg2(v2, v3) - v4 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v5 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) - v6.AddArg(x) - v7 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) - v7.AddArg(y) - v5.AddArg2(v6, v7) - v8 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) - v9 := b.NewValue0(v.Pos, OpMul32uhilo, types.NewTuple(typ.UInt32, typ.UInt32)) - v9.AddArg2(v2, v7) - v8.AddArg(v9) - v4.AddArg2(v5, v8) - v0.AddArg2(v1, v4) - v10 := b.NewValue0(v.Pos, OpSelect1, typ.UInt32) - v10.AddArg(v9) - v.AddArg2(v0, v10) + v.reset(OpLast) + v.Type = t + x0 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + x0.AddArg(x) + x1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + x1.AddArg(x) + y0 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + y0.AddArg(y) + y1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + y1.AddArg(y) + x0y0 := b.NewValue0(v.Pos, OpMul32uhilo, types.NewTuple(typ.UInt32, typ.UInt32)) + x0y0.AddArg2(x0, y0) + x0y0Hi := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) + x0y0Hi.AddArg(x0y0) + x0y0Lo := b.NewValue0(v.Pos, OpSelect1, typ.UInt32) + x0y0Lo.AddArg(x0y0) + v7 := b.NewValue0(v.Pos, OpInt64Make, typ.UInt64) + v8 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) + v9 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) + v10 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v10.AddArg2(x0, y1) + v11 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v11.AddArg2(x1, y0) + v9.AddArg2(v10, v11) + v8.AddArg2(x0y0Hi, v9) + v7.AddArg2(v8, x0y0Lo) + v.AddArgs(x0, x1, y0, y1, x0y0, x0y0Hi, x0y0Lo, v7) + return true + } +} +func rewriteValuedec64_OpMul64uhilo(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mul64uhilo x y) + // result: (Last x0: (Int64Lo x) x1: (Int64Hi x) y0: (Int64Lo y) y1: (Int64Hi y) x0y0: (Mul32uhilo x0 y0) x0y1: (Mul32uhilo x0 y1) x1y0: (Mul32uhilo x1 y0) x1y1: (Mul32uhilo x1 y1) x0y0Hi: (Select0 x0y0) x0y0Lo: (Select1 x0y0) x0y1Hi: (Select0 x0y1) x0y1Lo: (Select1 x0y1) x1y0Hi: (Select0 x1y0) x1y0Lo: (Select1 x1y0) x1y1Hi: (Select0 x1y1) x1y1Lo: (Select1 x1y1) w1a: (Add32carry x0y0Hi x0y1Lo) w2a: (Add32carrywithcarry x0y1Hi x1y0Hi (Select1 w1a)) w3a: (Add32withcarry x1y1Hi (Const32 [0]) (Select1 w2a)) w1b: (Add32carry x1y0Lo (Select0 w1a)) w2b: (Add32carrywithcarry x1y1Lo (Select0 w2a) (Select1 w1b)) w3b: (Add32withcarry w3a (Const32 [0]) (Select1 w2b)) (MakeTuple (Int64Make w3b (Select0 w2b)) (Int64Make (Select0 w1b) x0y0Lo))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLast) + v.Type = t + x0 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + x0.AddArg(x) + x1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + x1.AddArg(x) + y0 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + y0.AddArg(y) + y1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + y1.AddArg(y) + x0y0 := b.NewValue0(v.Pos, OpMul32uhilo, types.NewTuple(typ.UInt32, typ.UInt32)) + x0y0.AddArg2(x0, y0) + x0y1 := b.NewValue0(v.Pos, OpMul32uhilo, types.NewTuple(typ.UInt32, typ.UInt32)) + x0y1.AddArg2(x0, y1) + x1y0 := b.NewValue0(v.Pos, OpMul32uhilo, types.NewTuple(typ.UInt32, typ.UInt32)) + x1y0.AddArg2(x1, y0) + x1y1 := b.NewValue0(v.Pos, OpMul32uhilo, types.NewTuple(typ.UInt32, typ.UInt32)) + x1y1.AddArg2(x1, y1) + x0y0Hi := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) + x0y0Hi.AddArg(x0y0) + x0y0Lo := b.NewValue0(v.Pos, OpSelect1, typ.UInt32) + x0y0Lo.AddArg(x0y0) + x0y1Hi := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) + x0y1Hi.AddArg(x0y1) + x0y1Lo := b.NewValue0(v.Pos, OpSelect1, typ.UInt32) + x0y1Lo.AddArg(x0y1) + x1y0Hi := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) + x1y0Hi.AddArg(x1y0) + x1y0Lo := b.NewValue0(v.Pos, OpSelect1, typ.UInt32) + x1y0Lo.AddArg(x1y0) + x1y1Hi := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) + x1y1Hi.AddArg(x1y1) + x1y1Lo := b.NewValue0(v.Pos, OpSelect1, typ.UInt32) + x1y1Lo.AddArg(x1y1) + w1a := b.NewValue0(v.Pos, OpAdd32carry, types.NewTuple(typ.UInt32, types.TypeFlags)) + w1a.AddArg2(x0y0Hi, x0y1Lo) + w2a := b.NewValue0(v.Pos, OpAdd32carrywithcarry, types.NewTuple(typ.UInt32, types.TypeFlags)) + v18 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v18.AddArg(w1a) + w2a.AddArg3(x0y1Hi, x1y0Hi, v18) + w3a := b.NewValue0(v.Pos, OpAdd32withcarry, typ.UInt32) + v20 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v20.AuxInt = int32ToAuxInt(0) + v21 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v21.AddArg(w2a) + w3a.AddArg3(x1y1Hi, v20, v21) + w1b := b.NewValue0(v.Pos, OpAdd32carry, types.NewTuple(typ.UInt32, types.TypeFlags)) + v23 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) + v23.AddArg(w1a) + w1b.AddArg2(x1y0Lo, v23) + w2b := b.NewValue0(v.Pos, OpAdd32carrywithcarry, types.NewTuple(typ.UInt32, types.TypeFlags)) + v25 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) + v25.AddArg(w2a) + v26 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v26.AddArg(w1b) + w2b.AddArg3(x1y1Lo, v25, v26) + w3b := b.NewValue0(v.Pos, OpAdd32withcarry, typ.UInt32) + v28 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v28.AddArg(w2b) + w3b.AddArg3(w3a, v20, v28) + v29 := b.NewValue0(v.Pos, OpMakeTuple, types.NewTuple(typ.UInt64, typ.UInt64)) + v30 := b.NewValue0(v.Pos, OpInt64Make, typ.UInt64) + v31 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) + v31.AddArg(w2b) + v30.AddArg2(w3b, v31) + v32 := b.NewValue0(v.Pos, OpInt64Make, typ.UInt64) + v33 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) + v33.AddArg(w1b) + v32.AddArg2(v33, x0y0Lo) + v29.AddArg2(v30, v32) + v.AddArgs(x0, x1, y0, y1, x0y0, x0y1, x1y0, x1y1, x0y0Hi, x0y0Lo, x0y1Hi, x0y1Lo, x1y0Hi, x1y0Lo, x1y1Hi, x1y1Lo, w1a, w2a, w3a, w1b, w2b, w3b, v29) return true } } @@ -1310,6 +1503,8 @@ func rewriteValuedec64_OpRotateLeft32(v *Value) bool { func rewriteValuedec64_OpRotateLeft64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types // match: (RotateLeft64 x (Int64Make hi lo)) // result: (RotateLeft64 x lo) for { @@ -1322,6 +1517,458 @@ func rewriteValuedec64_OpRotateLeft64(v *Value) bool { v.AddArg2(x, lo) return true } + // match: (RotateLeft64 x (Const64 [c])) + // cond: c&63 == 0 + // result: x + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(c&63 == 0) { + break + } + v.copyOf(x) + return true + } + // match: (RotateLeft64 x (Const32 [c])) + // cond: c&63 == 0 + // result: x + for { + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(c&63 == 0) { + break + } + v.copyOf(x) + return true + } + // match: (RotateLeft64 x (Const16 [c])) + // cond: c&63 == 0 + // result: x + for { + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(c&63 == 0) { + break + } + v.copyOf(x) + return true + } + // match: (RotateLeft64 x (Const8 [c])) + // cond: c&63 == 0 + // result: x + for { + x := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + if !(c&63 == 0) { + break + } + v.copyOf(x) + return true + } + // match: (RotateLeft64 x (Const64 [c])) + // cond: c&63 == 32 + // result: (Int64Make (Int64Lo x) (Int64Hi x)) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(c&63 == 32) { + break + } + v.reset(OpInt64Make) + v.Type = t + v0 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } + // match: (RotateLeft64 x (Const32 [c])) + // cond: c&63 == 32 + // result: (Int64Make (Int64Lo x) (Int64Hi x)) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(c&63 == 32) { + break + } + v.reset(OpInt64Make) + v.Type = t + v0 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } + // match: (RotateLeft64 x (Const16 [c])) + // cond: c&63 == 32 + // result: (Int64Make (Int64Lo x) (Int64Hi x)) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(c&63 == 32) { + break + } + v.reset(OpInt64Make) + v.Type = t + v0 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } + // match: (RotateLeft64 x (Const8 [c])) + // cond: c&63 == 32 + // result: (Int64Make (Int64Lo x) (Int64Hi x)) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + if !(c&63 == 32) { + break + } + v.reset(OpInt64Make) + v.Type = t + v0 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } + // match: (RotateLeft64 x (Const64 [c])) + // cond: 0 < c&63 && c&63 < 32 + // result: (Int64Make (Or32 (Lsh32x32 (Int64Hi x) (Const32 [int32(c&31)])) (Rsh32Ux32 (Int64Lo x) (Const32 [int32(32-c&31)]))) (Or32 (Lsh32x32 (Int64Lo x) (Const32 [int32(c&31)])) (Rsh32Ux32 (Int64Hi x) (Const32 [int32(32-c&31)])))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(0 < c&63 && c&63 < 32) { + break + } + v.reset(OpInt64Make) + v.Type = t + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(int32(c & 31)) + v1.AddArg2(v2, v3) + v4 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) + v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v5.AddArg(x) + v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v6.AuxInt = int32ToAuxInt(int32(32 - c&31)) + v4.AddArg2(v5, v6) + v0.AddArg2(v1, v4) + v7 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v8 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) + v8.AddArg2(v5, v3) + v9 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) + v9.AddArg2(v2, v6) + v7.AddArg2(v8, v9) + v.AddArg2(v0, v7) + return true + } + // match: (RotateLeft64 x (Const32 [c])) + // cond: 0 < c&63 && c&63 < 32 + // result: (Int64Make (Or32 (Lsh32x32 (Int64Hi x) (Const32 [int32(c&31)])) (Rsh32Ux32 (Int64Lo x) (Const32 [int32(32-c&31)]))) (Or32 (Lsh32x32 (Int64Lo x) (Const32 [int32(c&31)])) (Rsh32Ux32 (Int64Hi x) (Const32 [int32(32-c&31)])))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(0 < c&63 && c&63 < 32) { + break + } + v.reset(OpInt64Make) + v.Type = t + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(int32(c & 31)) + v1.AddArg2(v2, v3) + v4 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) + v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v5.AddArg(x) + v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v6.AuxInt = int32ToAuxInt(int32(32 - c&31)) + v4.AddArg2(v5, v6) + v0.AddArg2(v1, v4) + v7 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v8 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) + v8.AddArg2(v5, v3) + v9 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) + v9.AddArg2(v2, v6) + v7.AddArg2(v8, v9) + v.AddArg2(v0, v7) + return true + } + // match: (RotateLeft64 x (Const16 [c])) + // cond: 0 < c&63 && c&63 < 32 + // result: (Int64Make (Or32 (Lsh32x32 (Int64Hi x) (Const32 [int32(c&31)])) (Rsh32Ux32 (Int64Lo x) (Const32 [int32(32-c&31)]))) (Or32 (Lsh32x32 (Int64Lo x) (Const32 [int32(c&31)])) (Rsh32Ux32 (Int64Hi x) (Const32 [int32(32-c&31)])))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(0 < c&63 && c&63 < 32) { + break + } + v.reset(OpInt64Make) + v.Type = t + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(int32(c & 31)) + v1.AddArg2(v2, v3) + v4 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) + v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v5.AddArg(x) + v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v6.AuxInt = int32ToAuxInt(int32(32 - c&31)) + v4.AddArg2(v5, v6) + v0.AddArg2(v1, v4) + v7 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v8 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) + v8.AddArg2(v5, v3) + v9 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) + v9.AddArg2(v2, v6) + v7.AddArg2(v8, v9) + v.AddArg2(v0, v7) + return true + } + // match: (RotateLeft64 x (Const8 [c])) + // cond: 0 < c&63 && c&63 < 32 + // result: (Int64Make (Or32 (Lsh32x32 (Int64Hi x) (Const32 [int32(c&31)])) (Rsh32Ux32 (Int64Lo x) (Const32 [int32(32-c&31)]))) (Or32 (Lsh32x32 (Int64Lo x) (Const32 [int32(c&31)])) (Rsh32Ux32 (Int64Hi x) (Const32 [int32(32-c&31)])))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + if !(0 < c&63 && c&63 < 32) { + break + } + v.reset(OpInt64Make) + v.Type = t + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(int32(c & 31)) + v1.AddArg2(v2, v3) + v4 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) + v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v5.AddArg(x) + v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v6.AuxInt = int32ToAuxInt(int32(32 - c&31)) + v4.AddArg2(v5, v6) + v0.AddArg2(v1, v4) + v7 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v8 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) + v8.AddArg2(v5, v3) + v9 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) + v9.AddArg2(v2, v6) + v7.AddArg2(v8, v9) + v.AddArg2(v0, v7) + return true + } + // match: (RotateLeft64 x (Const64 [c])) + // cond: 32 < c&63 && c&63 < 64 + // result: (Int64Make (Or32 (Lsh32x32 (Int64Lo x) (Const32 [int32(c&31)])) (Rsh32Ux32 (Int64Hi x) (Const32 [int32(32-c&31)]))) (Or32 (Lsh32x32 (Int64Hi x) (Const32 [int32(c&31)])) (Rsh32Ux32 (Int64Lo x) (Const32 [int32(32-c&31)])))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(32 < c&63 && c&63 < 64) { + break + } + v.reset(OpInt64Make) + v.Type = t + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(int32(c & 31)) + v1.AddArg2(v2, v3) + v4 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) + v5 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v5.AddArg(x) + v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v6.AuxInt = int32ToAuxInt(int32(32 - c&31)) + v4.AddArg2(v5, v6) + v0.AddArg2(v1, v4) + v7 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v8 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) + v8.AddArg2(v5, v3) + v9 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) + v9.AddArg2(v2, v6) + v7.AddArg2(v8, v9) + v.AddArg2(v0, v7) + return true + } + // match: (RotateLeft64 x (Const32 [c])) + // cond: 32 < c&63 && c&63 < 64 + // result: (Int64Make (Or32 (Lsh32x32 (Int64Lo x) (Const32 [int32(c&31)])) (Rsh32Ux32 (Int64Hi x) (Const32 [int32(32-c&31)]))) (Or32 (Lsh32x32 (Int64Hi x) (Const32 [int32(c&31)])) (Rsh32Ux32 (Int64Lo x) (Const32 [int32(32-c&31)])))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(32 < c&63 && c&63 < 64) { + break + } + v.reset(OpInt64Make) + v.Type = t + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(int32(c & 31)) + v1.AddArg2(v2, v3) + v4 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) + v5 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v5.AddArg(x) + v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v6.AuxInt = int32ToAuxInt(int32(32 - c&31)) + v4.AddArg2(v5, v6) + v0.AddArg2(v1, v4) + v7 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v8 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) + v8.AddArg2(v5, v3) + v9 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) + v9.AddArg2(v2, v6) + v7.AddArg2(v8, v9) + v.AddArg2(v0, v7) + return true + } + // match: (RotateLeft64 x (Const16 [c])) + // cond: 32 < c&63 && c&63 < 64 + // result: (Int64Make (Or32 (Lsh32x32 (Int64Lo x) (Const32 [int32(c&31)])) (Rsh32Ux32 (Int64Hi x) (Const32 [int32(32-c&31)]))) (Or32 (Lsh32x32 (Int64Hi x) (Const32 [int32(c&31)])) (Rsh32Ux32 (Int64Lo x) (Const32 [int32(32-c&31)])))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(32 < c&63 && c&63 < 64) { + break + } + v.reset(OpInt64Make) + v.Type = t + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(int32(c & 31)) + v1.AddArg2(v2, v3) + v4 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) + v5 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v5.AddArg(x) + v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v6.AuxInt = int32ToAuxInt(int32(32 - c&31)) + v4.AddArg2(v5, v6) + v0.AddArg2(v1, v4) + v7 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v8 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) + v8.AddArg2(v5, v3) + v9 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) + v9.AddArg2(v2, v6) + v7.AddArg2(v8, v9) + v.AddArg2(v0, v7) + return true + } + // match: (RotateLeft64 x (Const8 [c])) + // cond: 32 < c&63 && c&63 < 64 + // result: (Int64Make (Or32 (Lsh32x32 (Int64Lo x) (Const32 [int32(c&31)])) (Rsh32Ux32 (Int64Hi x) (Const32 [int32(32-c&31)]))) (Or32 (Lsh32x32 (Int64Hi x) (Const32 [int32(c&31)])) (Rsh32Ux32 (Int64Lo x) (Const32 [int32(32-c&31)])))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + if !(32 < c&63 && c&63 < 64) { + break + } + v.reset(OpInt64Make) + v.Type = t + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(int32(c & 31)) + v1.AddArg2(v2, v3) + v4 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) + v5 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v5.AddArg(x) + v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v6.AuxInt = int32ToAuxInt(int32(32 - c&31)) + v4.AddArg2(v5, v6) + v0.AddArg2(v1, v4) + v7 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v8 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) + v8.AddArg2(v5, v3) + v9 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) + v9.AddArg2(v2, v6) + v7.AddArg2(v8, v9) + v.AddArg2(v0, v7) + return true + } return false } func rewriteValuedec64_OpRotateLeft8(v *Value) bool { @@ -2251,6 +2898,34 @@ func rewriteValuedec64_OpRsh8x64(v *Value) bool { return true } } +func rewriteValuedec64_OpSelect0(v *Value) bool { + v_0 := v.Args[0] + // match: (Select0 (MakeTuple x y)) + // result: x + for { + if v_0.Op != OpMakeTuple { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValuedec64_OpSelect1(v *Value) bool { + v_0 := v.Args[0] + // match: (Select1 (MakeTuple x y)) + // result: y + for { + if v_0.Op != OpMakeTuple { + break + } + y := v_0.Args[1] + v.copyOf(y) + return true + } + return false +} func rewriteValuedec64_OpSignExt16to64(v *Value) bool { v_0 := v.Args[0] b := v.Block @@ -2361,29 +3036,33 @@ func rewriteValuedec64_OpSub64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Sub64 x y) - // result: (Int64Make (Sub32withcarry (Int64Hi x) (Int64Hi y) (Select1 (Sub32carry (Int64Lo x) (Int64Lo y)))) (Select0 (Sub32carry (Int64Lo x) (Int64Lo y)))) + // match: (Sub64 x y) + // result: (Last x0: (Int64Lo x) x1: (Int64Hi x) y0: (Int64Lo y) y1: (Int64Hi y) sub: (Sub32carry x0 y0) (Int64Make (Sub32withcarry x1 y1 (Select1 sub)) (Select0 sub))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpInt64Make) - v0 := b.NewValue0(v.Pos, OpSub32withcarry, typ.Int32) - v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) - v2.AddArg(y) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4 := b.NewValue0(v.Pos, OpSub32carry, types.NewTuple(typ.UInt32, types.TypeFlags)) - v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) - v5.AddArg(x) - v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) - v6.AddArg(y) - v4.AddArg2(v5, v6) - v3.AddArg(v4) - v0.AddArg3(v1, v2, v3) - v7 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) - v7.AddArg(v4) - v.AddArg2(v0, v7) + v.reset(OpLast) + v.Type = t + x0 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + x0.AddArg(x) + x1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + x1.AddArg(x) + y0 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + y0.AddArg(y) + y1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + y1.AddArg(y) + sub := b.NewValue0(v.Pos, OpSub32carry, types.NewTuple(typ.UInt32, types.TypeFlags)) + sub.AddArg2(x0, y0) + v5 := b.NewValue0(v.Pos, OpInt64Make, typ.UInt64) + v6 := b.NewValue0(v.Pos, OpSub32withcarry, typ.UInt32) + v7 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v7.AddArg(sub) + v6.AddArg3(x1, y1, v7) + v8 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) + v8.AddArg(sub) + v5.AddArg2(v6, v8) + v.AddArg6(x0, x1, y0, y1, sub, v5) return true } } diff --git a/src/cmd/compile/internal/ssa/rewritedivisible.go b/src/cmd/compile/internal/ssa/rewritedivisible.go new file mode 100644 index 00000000000..b9c077af0f4 --- /dev/null +++ b/src/cmd/compile/internal/ssa/rewritedivisible.go @@ -0,0 +1,1532 @@ +// Code generated from _gen/divisible.rules using 'go generate'; DO NOT EDIT. + +package ssa + +func rewriteValuedivisible(v *Value) bool { + switch v.Op { + case OpEq16: + return rewriteValuedivisible_OpEq16(v) + case OpEq32: + return rewriteValuedivisible_OpEq32(v) + case OpEq64: + return rewriteValuedivisible_OpEq64(v) + case OpEq8: + return rewriteValuedivisible_OpEq8(v) + case OpNeq16: + return rewriteValuedivisible_OpNeq16(v) + case OpNeq32: + return rewriteValuedivisible_OpNeq32(v) + case OpNeq64: + return rewriteValuedivisible_OpNeq64(v) + case OpNeq8: + return rewriteValuedivisible_OpNeq8(v) + } + return false +} +func rewriteValuedivisible_OpEq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq16 x (Mul16 (Div16u x (Const16 [c])) (Const16 [c]))) + // cond: x.Op != OpConst64 && isPowerOfTwo(c) + // result: (Eq16 (And16 x (Const16 [c-1])) (Const16 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul16 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpDiv16u { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_0_1.AuxInt) + if v_1_1.Op != OpConst16 || auxIntToInt16(v_1_1.AuxInt) != c || !(x.Op != OpConst64 && isPowerOfTwo(c)) { + continue + } + v.reset(OpEq16) + v0 := b.NewValue0(v.Pos, OpAnd16, t) + v1 := b.NewValue0(v.Pos, OpConst16, t) + v1.AuxInt = int16ToAuxInt(c - 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst16, t) + v2.AuxInt = int16ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + } + break + } + // match: (Eq16 x (Mul16 (Div16 x (Const16 [c])) (Const16 [c]))) + // cond: x.Op != OpConst64 && isPowerOfTwo(c) + // result: (Eq16 (And16 x (Const16 [c-1])) (Const16 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul16 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpDiv16 { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_0_1.AuxInt) + if v_1_1.Op != OpConst16 || auxIntToInt16(v_1_1.AuxInt) != c || !(x.Op != OpConst64 && isPowerOfTwo(c)) { + continue + } + v.reset(OpEq16) + v0 := b.NewValue0(v.Pos, OpAnd16, t) + v1 := b.NewValue0(v.Pos, OpConst16, t) + v1.AuxInt = int16ToAuxInt(c - 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst16, t) + v2.AuxInt = int16ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + } + break + } + // match: (Eq16 x (Mul16 div:(Div16u x (Const16 [c])) (Const16 [c]))) + // cond: div.Uses == 1 && x.Op != OpConst16 && udivisibleOK16(c) + // result: (Leq16U (RotateLeft16 (Mul16 x (Const16 [int16(udivisible16(c).m)])) (Const16 [int16(16 - udivisible16(c).k)])) (Const16 [int16(udivisible16(c).max)])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul16 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + div := v_1_0 + if div.Op != OpDiv16u { + continue + } + _ = div.Args[1] + if x != div.Args[0] { + continue + } + div_1 := div.Args[1] + if div_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(div_1.AuxInt) + if v_1_1.Op != OpConst16 || auxIntToInt16(v_1_1.AuxInt) != c || !(div.Uses == 1 && x.Op != OpConst16 && udivisibleOK16(c)) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpRotateLeft16, t) + v1 := b.NewValue0(v.Pos, OpMul16, t) + v2 := b.NewValue0(v.Pos, OpConst16, t) + v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m)) + v1.AddArg2(x, v2) + v3 := b.NewValue0(v.Pos, OpConst16, t) + v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k)) + v0.AddArg2(v1, v3) + v4 := b.NewValue0(v.Pos, OpConst16, t) + v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max)) + v.AddArg2(v0, v4) + return true + } + } + break + } + // match: (Eq16 x (Mul16 div:(Div16 x (Const16 [c])) (Const16 [c]))) + // cond: div.Uses == 1 && x.Op != OpConst16 && sdivisibleOK16(c) + // result: (Leq16U (RotateLeft16 (Add16 (Mul16 x (Const16 [int16(sdivisible16(c).m)])) (Const16 [int16(sdivisible16(c).a)])) (Const16 [int16(16 - sdivisible16(c).k)])) (Const16 [int16(sdivisible16(c).max)])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul16 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + div := v_1_0 + if div.Op != OpDiv16 { + continue + } + _ = div.Args[1] + if x != div.Args[0] { + continue + } + div_1 := div.Args[1] + if div_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(div_1.AuxInt) + if v_1_1.Op != OpConst16 || auxIntToInt16(v_1_1.AuxInt) != c || !(div.Uses == 1 && x.Op != OpConst16 && sdivisibleOK16(c)) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpRotateLeft16, t) + v1 := b.NewValue0(v.Pos, OpAdd16, t) + v2 := b.NewValue0(v.Pos, OpMul16, t) + v3 := b.NewValue0(v.Pos, OpConst16, t) + v3.AuxInt = int16ToAuxInt(int16(sdivisible16(c).m)) + v2.AddArg2(x, v3) + v4 := b.NewValue0(v.Pos, OpConst16, t) + v4.AuxInt = int16ToAuxInt(int16(sdivisible16(c).a)) + v1.AddArg2(v2, v4) + v5 := b.NewValue0(v.Pos, OpConst16, t) + v5.AuxInt = int16ToAuxInt(int16(16 - sdivisible16(c).k)) + v0.AddArg2(v1, v5) + v6 := b.NewValue0(v.Pos, OpConst16, t) + v6.AuxInt = int16ToAuxInt(int16(sdivisible16(c).max)) + v.AddArg2(v0, v6) + return true + } + } + break + } + return false +} +func rewriteValuedivisible_OpEq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq32 x (Mul32 (Div32u x (Const32 [c])) (Const32 [c]))) + // cond: x.Op != OpConst64 && isPowerOfTwo(c) + // result: (Eq32 (And32 x (Const32 [c-1])) (Const32 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul32 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpDiv32u { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_0_1.AuxInt) + if v_1_1.Op != OpConst32 || auxIntToInt32(v_1_1.AuxInt) != c || !(x.Op != OpConst64 && isPowerOfTwo(c)) { + continue + } + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpAnd32, t) + v1 := b.NewValue0(v.Pos, OpConst32, t) + v1.AuxInt = int32ToAuxInt(c - 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst32, t) + v2.AuxInt = int32ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + } + break + } + // match: (Eq32 x (Mul32 (Div32 x (Const32 [c])) (Const32 [c]))) + // cond: x.Op != OpConst64 && isPowerOfTwo(c) + // result: (Eq32 (And32 x (Const32 [c-1])) (Const32 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul32 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpDiv32 { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_0_1.AuxInt) + if v_1_1.Op != OpConst32 || auxIntToInt32(v_1_1.AuxInt) != c || !(x.Op != OpConst64 && isPowerOfTwo(c)) { + continue + } + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpAnd32, t) + v1 := b.NewValue0(v.Pos, OpConst32, t) + v1.AuxInt = int32ToAuxInt(c - 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst32, t) + v2.AuxInt = int32ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + } + break + } + // match: (Eq32 x (Mul32 div:(Div32u x (Const32 [c])) (Const32 [c]))) + // cond: div.Uses == 1 && x.Op != OpConst32 && udivisibleOK32(c) + // result: (Leq32U (RotateLeft32 (Mul32 x (Const32 [int32(udivisible32(c).m)])) (Const32 [int32(32 - udivisible32(c).k)])) (Const32 [int32(udivisible32(c).max)])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul32 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + div := v_1_0 + if div.Op != OpDiv32u { + continue + } + _ = div.Args[1] + if x != div.Args[0] { + continue + } + div_1 := div.Args[1] + if div_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(div_1.AuxInt) + if v_1_1.Op != OpConst32 || auxIntToInt32(v_1_1.AuxInt) != c || !(div.Uses == 1 && x.Op != OpConst32 && udivisibleOK32(c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpRotateLeft32, t) + v1 := b.NewValue0(v.Pos, OpMul32, t) + v2 := b.NewValue0(v.Pos, OpConst32, t) + v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m)) + v1.AddArg2(x, v2) + v3 := b.NewValue0(v.Pos, OpConst32, t) + v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k)) + v0.AddArg2(v1, v3) + v4 := b.NewValue0(v.Pos, OpConst32, t) + v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max)) + v.AddArg2(v0, v4) + return true + } + } + break + } + // match: (Eq32 x (Mul32 div:(Div32 x (Const32 [c])) (Const32 [c]))) + // cond: div.Uses == 1 && x.Op != OpConst32 && sdivisibleOK32(c) + // result: (Leq32U (RotateLeft32 (Add32 (Mul32 x (Const32 [int32(sdivisible32(c).m)])) (Const32 [int32(sdivisible32(c).a)])) (Const32 [int32(32 - sdivisible32(c).k)])) (Const32 [int32(sdivisible32(c).max)])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul32 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + div := v_1_0 + if div.Op != OpDiv32 { + continue + } + _ = div.Args[1] + if x != div.Args[0] { + continue + } + div_1 := div.Args[1] + if div_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(div_1.AuxInt) + if v_1_1.Op != OpConst32 || auxIntToInt32(v_1_1.AuxInt) != c || !(div.Uses == 1 && x.Op != OpConst32 && sdivisibleOK32(c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpRotateLeft32, t) + v1 := b.NewValue0(v.Pos, OpAdd32, t) + v2 := b.NewValue0(v.Pos, OpMul32, t) + v3 := b.NewValue0(v.Pos, OpConst32, t) + v3.AuxInt = int32ToAuxInt(int32(sdivisible32(c).m)) + v2.AddArg2(x, v3) + v4 := b.NewValue0(v.Pos, OpConst32, t) + v4.AuxInt = int32ToAuxInt(int32(sdivisible32(c).a)) + v1.AddArg2(v2, v4) + v5 := b.NewValue0(v.Pos, OpConst32, t) + v5.AuxInt = int32ToAuxInt(int32(32 - sdivisible32(c).k)) + v0.AddArg2(v1, v5) + v6 := b.NewValue0(v.Pos, OpConst32, t) + v6.AuxInt = int32ToAuxInt(int32(sdivisible32(c).max)) + v.AddArg2(v0, v6) + return true + } + } + break + } + return false +} +func rewriteValuedivisible_OpEq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq64 x (Mul64 (Div64u x (Const64 [c])) (Const64 [c]))) + // cond: x.Op != OpConst64 && isPowerOfTwo(c) + // result: (Eq64 (And64 x (Const64 [c-1])) (Const64 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul64 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpDiv64u { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_0_1.AuxInt) + if v_1_1.Op != OpConst64 || auxIntToInt64(v_1_1.AuxInt) != c || !(x.Op != OpConst64 && isPowerOfTwo(c)) { + continue + } + v.reset(OpEq64) + v0 := b.NewValue0(v.Pos, OpAnd64, t) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(c - 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + } + break + } + // match: (Eq64 x (Mul64 (Div64 x (Const64 [c])) (Const64 [c]))) + // cond: x.Op != OpConst64 && isPowerOfTwo(c) + // result: (Eq64 (And64 x (Const64 [c-1])) (Const64 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul64 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpDiv64 { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_0_1.AuxInt) + if v_1_1.Op != OpConst64 || auxIntToInt64(v_1_1.AuxInt) != c || !(x.Op != OpConst64 && isPowerOfTwo(c)) { + continue + } + v.reset(OpEq64) + v0 := b.NewValue0(v.Pos, OpAnd64, t) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(c - 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + } + break + } + // match: (Eq64 x (Mul64 div:(Div64u x (Const64 [c])) (Const64 [c]))) + // cond: div.Uses == 1 && x.Op != OpConst64 && udivisibleOK64(c) + // result: (Leq64U (RotateLeft64 (Mul64 x (Const64 [int64(udivisible64(c).m)])) (Const64 [int64(64 - udivisible64(c).k)])) (Const64 [int64(udivisible64(c).max)])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul64 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + div := v_1_0 + if div.Op != OpDiv64u { + continue + } + _ = div.Args[1] + if x != div.Args[0] { + continue + } + div_1 := div.Args[1] + if div_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(div_1.AuxInt) + if v_1_1.Op != OpConst64 || auxIntToInt64(v_1_1.AuxInt) != c || !(div.Uses == 1 && x.Op != OpConst64 && udivisibleOK64(c)) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpRotateLeft64, t) + v1 := b.NewValue0(v.Pos, OpMul64, t) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = int64ToAuxInt(int64(udivisible64(c).m)) + v1.AddArg2(x, v2) + v3 := b.NewValue0(v.Pos, OpConst64, t) + v3.AuxInt = int64ToAuxInt(int64(64 - udivisible64(c).k)) + v0.AddArg2(v1, v3) + v4 := b.NewValue0(v.Pos, OpConst64, t) + v4.AuxInt = int64ToAuxInt(int64(udivisible64(c).max)) + v.AddArg2(v0, v4) + return true + } + } + break + } + // match: (Eq64 x (Mul64 div:(Div64 x (Const64 [c])) (Const64 [c]))) + // cond: div.Uses == 1 && x.Op != OpConst64 && sdivisibleOK64(c) + // result: (Leq64U (RotateLeft64 (Add64 (Mul64 x (Const64 [int64(sdivisible64(c).m)])) (Const64 [int64(sdivisible64(c).a)])) (Const64 [int64(64 - sdivisible64(c).k)])) (Const64 [int64(sdivisible64(c).max)])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul64 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + div := v_1_0 + if div.Op != OpDiv64 { + continue + } + _ = div.Args[1] + if x != div.Args[0] { + continue + } + div_1 := div.Args[1] + if div_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(div_1.AuxInt) + if v_1_1.Op != OpConst64 || auxIntToInt64(v_1_1.AuxInt) != c || !(div.Uses == 1 && x.Op != OpConst64 && sdivisibleOK64(c)) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpRotateLeft64, t) + v1 := b.NewValue0(v.Pos, OpAdd64, t) + v2 := b.NewValue0(v.Pos, OpMul64, t) + v3 := b.NewValue0(v.Pos, OpConst64, t) + v3.AuxInt = int64ToAuxInt(int64(sdivisible64(c).m)) + v2.AddArg2(x, v3) + v4 := b.NewValue0(v.Pos, OpConst64, t) + v4.AuxInt = int64ToAuxInt(int64(sdivisible64(c).a)) + v1.AddArg2(v2, v4) + v5 := b.NewValue0(v.Pos, OpConst64, t) + v5.AuxInt = int64ToAuxInt(int64(64 - sdivisible64(c).k)) + v0.AddArg2(v1, v5) + v6 := b.NewValue0(v.Pos, OpConst64, t) + v6.AuxInt = int64ToAuxInt(int64(sdivisible64(c).max)) + v.AddArg2(v0, v6) + return true + } + } + break + } + return false +} +func rewriteValuedivisible_OpEq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq8 x (Mul8 (Div8u x (Const8 [c])) (Const8 [c]))) + // cond: x.Op != OpConst64 && isPowerOfTwo(c) + // result: (Eq8 (And8 x (Const8 [c-1])) (Const8 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul8 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpDiv8u { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_0_1.AuxInt) + if v_1_1.Op != OpConst8 || auxIntToInt8(v_1_1.AuxInt) != c || !(x.Op != OpConst64 && isPowerOfTwo(c)) { + continue + } + v.reset(OpEq8) + v0 := b.NewValue0(v.Pos, OpAnd8, t) + v1 := b.NewValue0(v.Pos, OpConst8, t) + v1.AuxInt = int8ToAuxInt(c - 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst8, t) + v2.AuxInt = int8ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + } + break + } + // match: (Eq8 x (Mul8 (Div8 x (Const8 [c])) (Const8 [c]))) + // cond: x.Op != OpConst64 && isPowerOfTwo(c) + // result: (Eq8 (And8 x (Const8 [c-1])) (Const8 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul8 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpDiv8 { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_0_1.AuxInt) + if v_1_1.Op != OpConst8 || auxIntToInt8(v_1_1.AuxInt) != c || !(x.Op != OpConst64 && isPowerOfTwo(c)) { + continue + } + v.reset(OpEq8) + v0 := b.NewValue0(v.Pos, OpAnd8, t) + v1 := b.NewValue0(v.Pos, OpConst8, t) + v1.AuxInt = int8ToAuxInt(c - 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst8, t) + v2.AuxInt = int8ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + } + break + } + // match: (Eq8 x (Mul8 div:(Div8u x (Const8 [c])) (Const8 [c]))) + // cond: div.Uses == 1 && x.Op != OpConst8 && udivisibleOK8(c) + // result: (Leq8U (RotateLeft8 (Mul8 x (Const8 [int8(udivisible8(c).m)])) (Const8 [int8(8 - udivisible8(c).k)])) (Const8 [int8(udivisible8(c).max)])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul8 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + div := v_1_0 + if div.Op != OpDiv8u { + continue + } + _ = div.Args[1] + if x != div.Args[0] { + continue + } + div_1 := div.Args[1] + if div_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(div_1.AuxInt) + if v_1_1.Op != OpConst8 || auxIntToInt8(v_1_1.AuxInt) != c || !(div.Uses == 1 && x.Op != OpConst8 && udivisibleOK8(c)) { + continue + } + v.reset(OpLeq8U) + v0 := b.NewValue0(v.Pos, OpRotateLeft8, t) + v1 := b.NewValue0(v.Pos, OpMul8, t) + v2 := b.NewValue0(v.Pos, OpConst8, t) + v2.AuxInt = int8ToAuxInt(int8(udivisible8(c).m)) + v1.AddArg2(x, v2) + v3 := b.NewValue0(v.Pos, OpConst8, t) + v3.AuxInt = int8ToAuxInt(int8(8 - udivisible8(c).k)) + v0.AddArg2(v1, v3) + v4 := b.NewValue0(v.Pos, OpConst8, t) + v4.AuxInt = int8ToAuxInt(int8(udivisible8(c).max)) + v.AddArg2(v0, v4) + return true + } + } + break + } + // match: (Eq8 x (Mul8 div:(Div8 x (Const8 [c])) (Const8 [c]))) + // cond: div.Uses == 1 && x.Op != OpConst8 && sdivisibleOK8(c) + // result: (Leq8U (RotateLeft8 (Add8 (Mul8 x (Const8 [int8(sdivisible8(c).m)])) (Const8 [int8(sdivisible8(c).a)])) (Const8 [int8(8 - sdivisible8(c).k)])) (Const8 [int8(sdivisible8(c).max)])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul8 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + div := v_1_0 + if div.Op != OpDiv8 { + continue + } + _ = div.Args[1] + if x != div.Args[0] { + continue + } + div_1 := div.Args[1] + if div_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(div_1.AuxInt) + if v_1_1.Op != OpConst8 || auxIntToInt8(v_1_1.AuxInt) != c || !(div.Uses == 1 && x.Op != OpConst8 && sdivisibleOK8(c)) { + continue + } + v.reset(OpLeq8U) + v0 := b.NewValue0(v.Pos, OpRotateLeft8, t) + v1 := b.NewValue0(v.Pos, OpAdd8, t) + v2 := b.NewValue0(v.Pos, OpMul8, t) + v3 := b.NewValue0(v.Pos, OpConst8, t) + v3.AuxInt = int8ToAuxInt(int8(sdivisible8(c).m)) + v2.AddArg2(x, v3) + v4 := b.NewValue0(v.Pos, OpConst8, t) + v4.AuxInt = int8ToAuxInt(int8(sdivisible8(c).a)) + v1.AddArg2(v2, v4) + v5 := b.NewValue0(v.Pos, OpConst8, t) + v5.AuxInt = int8ToAuxInt(int8(8 - sdivisible8(c).k)) + v0.AddArg2(v1, v5) + v6 := b.NewValue0(v.Pos, OpConst8, t) + v6.AuxInt = int8ToAuxInt(int8(sdivisible8(c).max)) + v.AddArg2(v0, v6) + return true + } + } + break + } + return false +} +func rewriteValuedivisible_OpNeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq16 x (Mul16 (Div16u x (Const16 [c])) (Const16 [c]))) + // cond: x.Op != OpConst64 && isPowerOfTwo(c) + // result: (Neq16 (And16 x (Const16 [c-1])) (Const16 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul16 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpDiv16u { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_0_1.AuxInt) + if v_1_1.Op != OpConst16 || auxIntToInt16(v_1_1.AuxInt) != c || !(x.Op != OpConst64 && isPowerOfTwo(c)) { + continue + } + v.reset(OpNeq16) + v0 := b.NewValue0(v.Pos, OpAnd16, t) + v1 := b.NewValue0(v.Pos, OpConst16, t) + v1.AuxInt = int16ToAuxInt(c - 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst16, t) + v2.AuxInt = int16ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + } + break + } + // match: (Neq16 x (Mul16 (Div16 x (Const16 [c])) (Const16 [c]))) + // cond: x.Op != OpConst64 && isPowerOfTwo(c) + // result: (Neq16 (And16 x (Const16 [c-1])) (Const16 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul16 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpDiv16 { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_0_1.AuxInt) + if v_1_1.Op != OpConst16 || auxIntToInt16(v_1_1.AuxInt) != c || !(x.Op != OpConst64 && isPowerOfTwo(c)) { + continue + } + v.reset(OpNeq16) + v0 := b.NewValue0(v.Pos, OpAnd16, t) + v1 := b.NewValue0(v.Pos, OpConst16, t) + v1.AuxInt = int16ToAuxInt(c - 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst16, t) + v2.AuxInt = int16ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + } + break + } + // match: (Neq16 x (Mul16 div:(Div16u x (Const16 [c])) (Const16 [c]))) + // cond: div.Uses == 1 && x.Op != OpConst16 && udivisibleOK16(c) + // result: (Less16U (Const16 [int16(udivisible16(c).max)]) (RotateLeft16 (Mul16 x (Const16 [int16(udivisible16(c).m)])) (Const16 [int16(16 - udivisible16(c).k)]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul16 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + div := v_1_0 + if div.Op != OpDiv16u { + continue + } + _ = div.Args[1] + if x != div.Args[0] { + continue + } + div_1 := div.Args[1] + if div_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(div_1.AuxInt) + if v_1_1.Op != OpConst16 || auxIntToInt16(v_1_1.AuxInt) != c || !(div.Uses == 1 && x.Op != OpConst16 && udivisibleOK16(c)) { + continue + } + v.reset(OpLess16U) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(int16(udivisible16(c).max)) + v1 := b.NewValue0(v.Pos, OpRotateLeft16, t) + v2 := b.NewValue0(v.Pos, OpMul16, t) + v3 := b.NewValue0(v.Pos, OpConst16, t) + v3.AuxInt = int16ToAuxInt(int16(udivisible16(c).m)) + v2.AddArg2(x, v3) + v4 := b.NewValue0(v.Pos, OpConst16, t) + v4.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k)) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } + } + break + } + // match: (Neq16 x (Mul16 div:(Div16 x (Const16 [c])) (Const16 [c]))) + // cond: div.Uses == 1 && x.Op != OpConst16 && sdivisibleOK16(c) + // result: (Less16U (Const16 [int16(sdivisible16(c).max)]) (RotateLeft16 (Add16 (Mul16 x (Const16 [int16(sdivisible16(c).m)])) (Const16 [int16(sdivisible16(c).a)])) (Const16 [int16(16 - sdivisible16(c).k)]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul16 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + div := v_1_0 + if div.Op != OpDiv16 { + continue + } + _ = div.Args[1] + if x != div.Args[0] { + continue + } + div_1 := div.Args[1] + if div_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(div_1.AuxInt) + if v_1_1.Op != OpConst16 || auxIntToInt16(v_1_1.AuxInt) != c || !(div.Uses == 1 && x.Op != OpConst16 && sdivisibleOK16(c)) { + continue + } + v.reset(OpLess16U) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(int16(sdivisible16(c).max)) + v1 := b.NewValue0(v.Pos, OpRotateLeft16, t) + v2 := b.NewValue0(v.Pos, OpAdd16, t) + v3 := b.NewValue0(v.Pos, OpMul16, t) + v4 := b.NewValue0(v.Pos, OpConst16, t) + v4.AuxInt = int16ToAuxInt(int16(sdivisible16(c).m)) + v3.AddArg2(x, v4) + v5 := b.NewValue0(v.Pos, OpConst16, t) + v5.AuxInt = int16ToAuxInt(int16(sdivisible16(c).a)) + v2.AddArg2(v3, v5) + v6 := b.NewValue0(v.Pos, OpConst16, t) + v6.AuxInt = int16ToAuxInt(int16(16 - sdivisible16(c).k)) + v1.AddArg2(v2, v6) + v.AddArg2(v0, v1) + return true + } + } + break + } + return false +} +func rewriteValuedivisible_OpNeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq32 x (Mul32 (Div32u x (Const32 [c])) (Const32 [c]))) + // cond: x.Op != OpConst64 && isPowerOfTwo(c) + // result: (Neq32 (And32 x (Const32 [c-1])) (Const32 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul32 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpDiv32u { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_0_1.AuxInt) + if v_1_1.Op != OpConst32 || auxIntToInt32(v_1_1.AuxInt) != c || !(x.Op != OpConst64 && isPowerOfTwo(c)) { + continue + } + v.reset(OpNeq32) + v0 := b.NewValue0(v.Pos, OpAnd32, t) + v1 := b.NewValue0(v.Pos, OpConst32, t) + v1.AuxInt = int32ToAuxInt(c - 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst32, t) + v2.AuxInt = int32ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + } + break + } + // match: (Neq32 x (Mul32 (Div32 x (Const32 [c])) (Const32 [c]))) + // cond: x.Op != OpConst64 && isPowerOfTwo(c) + // result: (Neq32 (And32 x (Const32 [c-1])) (Const32 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul32 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpDiv32 { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_0_1.AuxInt) + if v_1_1.Op != OpConst32 || auxIntToInt32(v_1_1.AuxInt) != c || !(x.Op != OpConst64 && isPowerOfTwo(c)) { + continue + } + v.reset(OpNeq32) + v0 := b.NewValue0(v.Pos, OpAnd32, t) + v1 := b.NewValue0(v.Pos, OpConst32, t) + v1.AuxInt = int32ToAuxInt(c - 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst32, t) + v2.AuxInt = int32ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + } + break + } + // match: (Neq32 x (Mul32 div:(Div32u x (Const32 [c])) (Const32 [c]))) + // cond: div.Uses == 1 && x.Op != OpConst32 && udivisibleOK32(c) + // result: (Less32U (Const32 [int32(udivisible32(c).max)]) (RotateLeft32 (Mul32 x (Const32 [int32(udivisible32(c).m)])) (Const32 [int32(32 - udivisible32(c).k)]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul32 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + div := v_1_0 + if div.Op != OpDiv32u { + continue + } + _ = div.Args[1] + if x != div.Args[0] { + continue + } + div_1 := div.Args[1] + if div_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(div_1.AuxInt) + if v_1_1.Op != OpConst32 || auxIntToInt32(v_1_1.AuxInt) != c || !(div.Uses == 1 && x.Op != OpConst32 && udivisibleOK32(c)) { + continue + } + v.reset(OpLess32U) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(int32(udivisible32(c).max)) + v1 := b.NewValue0(v.Pos, OpRotateLeft32, t) + v2 := b.NewValue0(v.Pos, OpMul32, t) + v3 := b.NewValue0(v.Pos, OpConst32, t) + v3.AuxInt = int32ToAuxInt(int32(udivisible32(c).m)) + v2.AddArg2(x, v3) + v4 := b.NewValue0(v.Pos, OpConst32, t) + v4.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k)) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } + } + break + } + // match: (Neq32 x (Mul32 div:(Div32 x (Const32 [c])) (Const32 [c]))) + // cond: div.Uses == 1 && x.Op != OpConst32 && sdivisibleOK32(c) + // result: (Less32U (Const32 [int32(sdivisible32(c).max)]) (RotateLeft32 (Add32 (Mul32 x (Const32 [int32(sdivisible32(c).m)])) (Const32 [int32(sdivisible32(c).a)])) (Const32 [int32(32 - sdivisible32(c).k)]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul32 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + div := v_1_0 + if div.Op != OpDiv32 { + continue + } + _ = div.Args[1] + if x != div.Args[0] { + continue + } + div_1 := div.Args[1] + if div_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(div_1.AuxInt) + if v_1_1.Op != OpConst32 || auxIntToInt32(v_1_1.AuxInt) != c || !(div.Uses == 1 && x.Op != OpConst32 && sdivisibleOK32(c)) { + continue + } + v.reset(OpLess32U) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(int32(sdivisible32(c).max)) + v1 := b.NewValue0(v.Pos, OpRotateLeft32, t) + v2 := b.NewValue0(v.Pos, OpAdd32, t) + v3 := b.NewValue0(v.Pos, OpMul32, t) + v4 := b.NewValue0(v.Pos, OpConst32, t) + v4.AuxInt = int32ToAuxInt(int32(sdivisible32(c).m)) + v3.AddArg2(x, v4) + v5 := b.NewValue0(v.Pos, OpConst32, t) + v5.AuxInt = int32ToAuxInt(int32(sdivisible32(c).a)) + v2.AddArg2(v3, v5) + v6 := b.NewValue0(v.Pos, OpConst32, t) + v6.AuxInt = int32ToAuxInt(int32(32 - sdivisible32(c).k)) + v1.AddArg2(v2, v6) + v.AddArg2(v0, v1) + return true + } + } + break + } + return false +} +func rewriteValuedivisible_OpNeq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq64 x (Mul64 (Div64u x (Const64 [c])) (Const64 [c]))) + // cond: x.Op != OpConst64 && isPowerOfTwo(c) + // result: (Neq64 (And64 x (Const64 [c-1])) (Const64 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul64 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpDiv64u { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_0_1.AuxInt) + if v_1_1.Op != OpConst64 || auxIntToInt64(v_1_1.AuxInt) != c || !(x.Op != OpConst64 && isPowerOfTwo(c)) { + continue + } + v.reset(OpNeq64) + v0 := b.NewValue0(v.Pos, OpAnd64, t) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(c - 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + } + break + } + // match: (Neq64 x (Mul64 (Div64 x (Const64 [c])) (Const64 [c]))) + // cond: x.Op != OpConst64 && isPowerOfTwo(c) + // result: (Neq64 (And64 x (Const64 [c-1])) (Const64 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul64 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpDiv64 { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_0_1.AuxInt) + if v_1_1.Op != OpConst64 || auxIntToInt64(v_1_1.AuxInt) != c || !(x.Op != OpConst64 && isPowerOfTwo(c)) { + continue + } + v.reset(OpNeq64) + v0 := b.NewValue0(v.Pos, OpAnd64, t) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(c - 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + } + break + } + // match: (Neq64 x (Mul64 div:(Div64u x (Const64 [c])) (Const64 [c]))) + // cond: div.Uses == 1 && x.Op != OpConst64 && udivisibleOK64(c) + // result: (Less64U (Const64 [int64(udivisible64(c).max)]) (RotateLeft64 (Mul64 x (Const64 [int64(udivisible64(c).m)])) (Const64 [int64(64 - udivisible64(c).k)]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul64 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + div := v_1_0 + if div.Op != OpDiv64u { + continue + } + _ = div.Args[1] + if x != div.Args[0] { + continue + } + div_1 := div.Args[1] + if div_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(div_1.AuxInt) + if v_1_1.Op != OpConst64 || auxIntToInt64(v_1_1.AuxInt) != c || !(div.Uses == 1 && x.Op != OpConst64 && udivisibleOK64(c)) { + continue + } + v.reset(OpLess64U) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(udivisible64(c).max)) + v1 := b.NewValue0(v.Pos, OpRotateLeft64, t) + v2 := b.NewValue0(v.Pos, OpMul64, t) + v3 := b.NewValue0(v.Pos, OpConst64, t) + v3.AuxInt = int64ToAuxInt(int64(udivisible64(c).m)) + v2.AddArg2(x, v3) + v4 := b.NewValue0(v.Pos, OpConst64, t) + v4.AuxInt = int64ToAuxInt(int64(64 - udivisible64(c).k)) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } + } + break + } + // match: (Neq64 x (Mul64 div:(Div64 x (Const64 [c])) (Const64 [c]))) + // cond: div.Uses == 1 && x.Op != OpConst64 && sdivisibleOK64(c) + // result: (Less64U (Const64 [int64(sdivisible64(c).max)]) (RotateLeft64 (Add64 (Mul64 x (Const64 [int64(sdivisible64(c).m)])) (Const64 [int64(sdivisible64(c).a)])) (Const64 [int64(64 - sdivisible64(c).k)]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul64 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + div := v_1_0 + if div.Op != OpDiv64 { + continue + } + _ = div.Args[1] + if x != div.Args[0] { + continue + } + div_1 := div.Args[1] + if div_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(div_1.AuxInt) + if v_1_1.Op != OpConst64 || auxIntToInt64(v_1_1.AuxInt) != c || !(div.Uses == 1 && x.Op != OpConst64 && sdivisibleOK64(c)) { + continue + } + v.reset(OpLess64U) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(sdivisible64(c).max)) + v1 := b.NewValue0(v.Pos, OpRotateLeft64, t) + v2 := b.NewValue0(v.Pos, OpAdd64, t) + v3 := b.NewValue0(v.Pos, OpMul64, t) + v4 := b.NewValue0(v.Pos, OpConst64, t) + v4.AuxInt = int64ToAuxInt(int64(sdivisible64(c).m)) + v3.AddArg2(x, v4) + v5 := b.NewValue0(v.Pos, OpConst64, t) + v5.AuxInt = int64ToAuxInt(int64(sdivisible64(c).a)) + v2.AddArg2(v3, v5) + v6 := b.NewValue0(v.Pos, OpConst64, t) + v6.AuxInt = int64ToAuxInt(int64(64 - sdivisible64(c).k)) + v1.AddArg2(v2, v6) + v.AddArg2(v0, v1) + return true + } + } + break + } + return false +} +func rewriteValuedivisible_OpNeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq8 x (Mul8 (Div8u x (Const8 [c])) (Const8 [c]))) + // cond: x.Op != OpConst64 && isPowerOfTwo(c) + // result: (Neq8 (And8 x (Const8 [c-1])) (Const8 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul8 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpDiv8u { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_0_1.AuxInt) + if v_1_1.Op != OpConst8 || auxIntToInt8(v_1_1.AuxInt) != c || !(x.Op != OpConst64 && isPowerOfTwo(c)) { + continue + } + v.reset(OpNeq8) + v0 := b.NewValue0(v.Pos, OpAnd8, t) + v1 := b.NewValue0(v.Pos, OpConst8, t) + v1.AuxInt = int8ToAuxInt(c - 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst8, t) + v2.AuxInt = int8ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + } + break + } + // match: (Neq8 x (Mul8 (Div8 x (Const8 [c])) (Const8 [c]))) + // cond: x.Op != OpConst64 && isPowerOfTwo(c) + // result: (Neq8 (And8 x (Const8 [c-1])) (Const8 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul8 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpDiv8 { + continue + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + continue + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_0_1.AuxInt) + if v_1_1.Op != OpConst8 || auxIntToInt8(v_1_1.AuxInt) != c || !(x.Op != OpConst64 && isPowerOfTwo(c)) { + continue + } + v.reset(OpNeq8) + v0 := b.NewValue0(v.Pos, OpAnd8, t) + v1 := b.NewValue0(v.Pos, OpConst8, t) + v1.AuxInt = int8ToAuxInt(c - 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst8, t) + v2.AuxInt = int8ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + } + break + } + // match: (Neq8 x (Mul8 div:(Div8u x (Const8 [c])) (Const8 [c]))) + // cond: div.Uses == 1 && x.Op != OpConst8 && udivisibleOK8(c) + // result: (Less8U (Const8 [int8(udivisible8(c).max)]) (RotateLeft8 (Mul8 x (Const8 [int8(udivisible8(c).m)])) (Const8 [int8(8 - udivisible8(c).k)]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul8 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + div := v_1_0 + if div.Op != OpDiv8u { + continue + } + _ = div.Args[1] + if x != div.Args[0] { + continue + } + div_1 := div.Args[1] + if div_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(div_1.AuxInt) + if v_1_1.Op != OpConst8 || auxIntToInt8(v_1_1.AuxInt) != c || !(div.Uses == 1 && x.Op != OpConst8 && udivisibleOK8(c)) { + continue + } + v.reset(OpLess8U) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(int8(udivisible8(c).max)) + v1 := b.NewValue0(v.Pos, OpRotateLeft8, t) + v2 := b.NewValue0(v.Pos, OpMul8, t) + v3 := b.NewValue0(v.Pos, OpConst8, t) + v3.AuxInt = int8ToAuxInt(int8(udivisible8(c).m)) + v2.AddArg2(x, v3) + v4 := b.NewValue0(v.Pos, OpConst8, t) + v4.AuxInt = int8ToAuxInt(int8(8 - udivisible8(c).k)) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } + } + break + } + // match: (Neq8 x (Mul8 div:(Div8 x (Const8 [c])) (Const8 [c]))) + // cond: div.Uses == 1 && x.Op != OpConst8 && sdivisibleOK8(c) + // result: (Less8U (Const8 [int8(sdivisible8(c).max)]) (RotateLeft8 (Add8 (Mul8 x (Const8 [int8(sdivisible8(c).m)])) (Const8 [int8(sdivisible8(c).a)])) (Const8 [int8(8 - sdivisible8(c).k)]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul8 { + continue + } + t := v_1.Type + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + div := v_1_0 + if div.Op != OpDiv8 { + continue + } + _ = div.Args[1] + if x != div.Args[0] { + continue + } + div_1 := div.Args[1] + if div_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(div_1.AuxInt) + if v_1_1.Op != OpConst8 || auxIntToInt8(v_1_1.AuxInt) != c || !(div.Uses == 1 && x.Op != OpConst8 && sdivisibleOK8(c)) { + continue + } + v.reset(OpLess8U) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(int8(sdivisible8(c).max)) + v1 := b.NewValue0(v.Pos, OpRotateLeft8, t) + v2 := b.NewValue0(v.Pos, OpAdd8, t) + v3 := b.NewValue0(v.Pos, OpMul8, t) + v4 := b.NewValue0(v.Pos, OpConst8, t) + v4.AuxInt = int8ToAuxInt(int8(sdivisible8(c).m)) + v3.AddArg2(x, v4) + v5 := b.NewValue0(v.Pos, OpConst8, t) + v5.AuxInt = int8ToAuxInt(int8(sdivisible8(c).a)) + v2.AddArg2(v3, v5) + v6 := b.NewValue0(v.Pos, OpConst8, t) + v6.AuxInt = int8ToAuxInt(int8(8 - sdivisible8(c).k)) + v1.AddArg2(v2, v6) + v.AddArg2(v0, v1) + return true + } + } + break + } + return false +} +func rewriteBlockdivisible(b *Block) bool { + return false +} diff --git a/src/cmd/compile/internal/ssa/rewritedivmod.go b/src/cmd/compile/internal/ssa/rewritedivmod.go new file mode 100644 index 00000000000..ab5cf7d676a --- /dev/null +++ b/src/cmd/compile/internal/ssa/rewritedivmod.go @@ -0,0 +1,923 @@ +// Code generated from _gen/divmod.rules using 'go generate'; DO NOT EDIT. + +package ssa + +func rewriteValuedivmod(v *Value) bool { + switch v.Op { + case OpDiv16: + return rewriteValuedivmod_OpDiv16(v) + case OpDiv16u: + return rewriteValuedivmod_OpDiv16u(v) + case OpDiv32: + return rewriteValuedivmod_OpDiv32(v) + case OpDiv32u: + return rewriteValuedivmod_OpDiv32u(v) + case OpDiv64: + return rewriteValuedivmod_OpDiv64(v) + case OpDiv64u: + return rewriteValuedivmod_OpDiv64u(v) + case OpDiv8: + return rewriteValuedivmod_OpDiv8(v) + case OpDiv8u: + return rewriteValuedivmod_OpDiv8u(v) + } + return false +} +func rewriteValuedivmod_OpDiv16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16 n (Const16 [c])) + // cond: isPowerOfTwo(c) + // result: (Rsh16x64 (Add16 n (Rsh16Ux64 (Rsh16x64 n (Const64 [15])) (Const64 [int64(16-log16(c))]))) (Const64 [int64(log16(c))])) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(isPowerOfTwo(c)) { + break + } + v.reset(OpRsh16x64) + v0 := b.NewValue0(v.Pos, OpAdd16, t) + v1 := b.NewValue0(v.Pos, OpRsh16Ux64, t) + v2 := b.NewValue0(v.Pos, OpRsh16x64, t) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(15) + v2.AddArg2(n, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(int64(16 - log16(c))) + v1.AddArg2(v2, v4) + v0.AddArg2(n, v1) + v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5.AuxInt = int64ToAuxInt(int64(log16(c))) + v.AddArg2(v0, v5) + return true + } + // match: (Div16 x (Const16 [c])) + // cond: smagicOK16(c) + // result: (Sub16 (Rsh32x64 (Mul32 (SignExt16to32 x) (Const32 [int32(smagic16(c).m)])) (Const64 [16 + smagic16(c).s])) (Rsh32x64 (SignExt16to32 x) (Const64 [31]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(smagicOK16(c)) { + break + } + v.reset(OpSub16) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRsh32x64, t) + v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(int32(smagic16(c).m)) + v1.AddArg2(v2, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(16 + smagic16(c).s) + v0.AddArg2(v1, v4) + v5 := b.NewValue0(v.Pos, OpRsh32x64, t) + v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v6.AuxInt = int64ToAuxInt(31) + v5.AddArg2(v2, v6) + v.AddArg2(v0, v5) + return true + } + return false +} +func rewriteValuedivmod_OpDiv16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Div16u x (Const16 [c])) + // cond: t.IsSigned() && smagicOK16(c) + // result: (Rsh32Ux64 (Mul32 (SignExt16to32 x) (Const32 [int32(smagic16(c).m)])) (Const64 [16 + smagic16(c).s])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(t.IsSigned() && smagicOK16(c)) { + break + } + v.reset(OpRsh32Ux64) + v.Type = t + v0 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int32ToAuxInt(int32(smagic16(c).m)) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(16 + smagic16(c).s) + v.AddArg2(v0, v3) + return true + } + // match: (Div16u x (Const16 [c])) + // cond: umagicOK16(c) && config.RegSize == 8 + // result: (Trunc64to16 (Rsh64Ux64 (Mul64 (ZeroExt16to64 x) (Const64 [int64(1<<16 + umagic16(c).m)])) (Const64 [16 + umagic16(c).s]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(umagicOK16(c) && config.RegSize == 8) { + break + } + v.reset(OpTrunc64to16) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(int64(1<<16 + umagic16(c).m)) + v1.AddArg2(v2, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(16 + umagic16(c).s) + v0.AddArg2(v1, v4) + v.AddArg(v0) + return true + } + // match: (Div16u x (Const16 [c])) + // cond: umagicOK16(c) && umagic16(c).m&1 == 0 + // result: (Trunc32to16 (Rsh32Ux64 (Mul32 (ZeroExt16to32 x) (Const32 [int32(1<<15 + umagic16(c).m/2)])) (Const64 [16 + umagic16(c).s - 1]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(umagicOK16(c) && umagic16(c).m&1 == 0) { + break + } + v.reset(OpTrunc32to16) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(int32(1<<15 + umagic16(c).m/2)) + v1.AddArg2(v2, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(16 + umagic16(c).s - 1) + v0.AddArg2(v1, v4) + v.AddArg(v0) + return true + } + // match: (Div16u x (Const16 [c])) + // cond: umagicOK16(c) && config.RegSize == 4 && c&1 == 0 + // result: (Trunc32to16 (Rsh32Ux64 (Mul32 (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1])) (Const32 [int32(1<<15 + (umagic16(c).m+1)/2)])) (Const64 [16 + umagic16(c).s - 2]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(umagicOK16(c) && config.RegSize == 4 && c&1 == 0) { + break + } + v.reset(OpTrunc32to16) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(1) + v2.AddArg2(v3, v4) + v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v5.AuxInt = int32ToAuxInt(int32(1<<15 + (umagic16(c).m+1)/2)) + v1.AddArg2(v2, v5) + v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v6.AuxInt = int64ToAuxInt(16 + umagic16(c).s - 2) + v0.AddArg2(v1, v6) + v.AddArg(v0) + return true + } + // match: (Div16u x (Const16 [c])) + // cond: umagicOK16(c) && config.RegSize == 4 + // result: (Trunc32to16 (Rsh32Ux64 (Avg32u (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) (Mul32 (ZeroExt16to32 x) (Const32 [int32(umagic16(c).m)]))) (Const64 [16 + umagic16(c).s - 1]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(umagicOK16(c) && config.RegSize == 4) { + break + } + v.reset(OpTrunc32to16) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpAvg32u, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpLsh32x64, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(16) + v2.AddArg2(v3, v4) + v5 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v6.AuxInt = int32ToAuxInt(int32(umagic16(c).m)) + v5.AddArg2(v3, v6) + v1.AddArg2(v2, v5) + v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v7.AuxInt = int64ToAuxInt(16 + umagic16(c).s - 1) + v0.AddArg2(v1, v7) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuedivmod_OpDiv32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Div32 n (Const32 [c])) + // cond: isPowerOfTwo(c) + // result: (Rsh32x64 (Add32 n (Rsh32Ux64 (Rsh32x64 n (Const64 [31])) (Const64 [int64(32-log32(c))]))) (Const64 [int64(log32(c))])) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(isPowerOfTwo(c)) { + break + } + v.reset(OpRsh32x64) + v0 := b.NewValue0(v.Pos, OpAdd32, t) + v1 := b.NewValue0(v.Pos, OpRsh32Ux64, t) + v2 := b.NewValue0(v.Pos, OpRsh32x64, t) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(31) + v2.AddArg2(n, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(int64(32 - log32(c))) + v1.AddArg2(v2, v4) + v0.AddArg2(n, v1) + v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5.AuxInt = int64ToAuxInt(int64(log32(c))) + v.AddArg2(v0, v5) + return true + } + // match: (Div32 x (Const32 [c])) + // cond: smagicOK32(c) && config.RegSize == 8 + // result: (Sub32 (Rsh64x64 (Mul64 (SignExt32to64 x) (Const64 [int64(smagic32(c).m)])) (Const64 [32 + smagic32(c).s])) (Rsh64x64 (SignExt32to64 x) (Const64 [63]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(smagicOK32(c) && config.RegSize == 8) { + break + } + v.reset(OpSub32) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRsh64x64, t) + v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(int64(smagic32(c).m)) + v1.AddArg2(v2, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(32 + smagic32(c).s) + v0.AddArg2(v1, v4) + v5 := b.NewValue0(v.Pos, OpRsh64x64, t) + v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v6.AuxInt = int64ToAuxInt(63) + v5.AddArg2(v2, v6) + v.AddArg2(v0, v5) + return true + } + // match: (Div32 x (Const32 [c])) + // cond: smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 + // result: (Sub32 (Rsh32x64 (Hmul32 x (Const32 [int32(smagic32(c).m/2)])) (Const64 [smagic32(c).s - 1])) (Rsh32x64 x (Const64 [31]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0) { + break + } + v.reset(OpSub32) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRsh32x64, t) + v1 := b.NewValue0(v.Pos, OpHmul32, t) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int32ToAuxInt(int32(smagic32(c).m / 2)) + v1.AddArg2(x, v2) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(smagic32(c).s - 1) + v0.AddArg2(v1, v3) + v4 := b.NewValue0(v.Pos, OpRsh32x64, t) + v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5.AuxInt = int64ToAuxInt(31) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) + return true + } + // match: (Div32 x (Const32 [c])) + // cond: smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 + // result: (Sub32 (Rsh32x64 (Add32 x (Hmul32 x (Const32 [int32(smagic32(c).m)]))) (Const64 [smagic32(c).s])) (Rsh32x64 x (Const64 [31]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0) { + break + } + v.reset(OpSub32) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRsh32x64, t) + v1 := b.NewValue0(v.Pos, OpAdd32, t) + v2 := b.NewValue0(v.Pos, OpHmul32, t) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(int32(smagic32(c).m)) + v2.AddArg2(x, v3) + v1.AddArg2(x, v2) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(smagic32(c).s) + v0.AddArg2(v1, v4) + v5 := b.NewValue0(v.Pos, OpRsh32x64, t) + v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v6.AuxInt = int64ToAuxInt(31) + v5.AddArg2(x, v6) + v.AddArg2(v0, v5) + return true + } + return false +} +func rewriteValuedivmod_OpDiv32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Div32u x (Const32 [c])) + // cond: t.IsSigned() && smagicOK32(c) && config.RegSize == 8 + // result: (Rsh64Ux64 (Mul64 (SignExt32to64 x) (Const64 [int64(smagic32(c).m)])) (Const64 [32 + smagic32(c).s])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(t.IsSigned() && smagicOK32(c) && config.RegSize == 8) { + break + } + v.reset(OpRsh64Ux64) + v.Type = t + v0 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64ToAuxInt(int64(smagic32(c).m)) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(32 + smagic32(c).s) + v.AddArg2(v0, v3) + return true + } + // match: (Div32u x (Const32 [c])) + // cond: t.IsSigned() && smagicOK32(c) && config.RegSize == 4 + // result: (Rsh32Ux64 (Hmul32u x (Const32 [int32(smagic32(c).m)])) (Const64 [smagic32(c).s])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(t.IsSigned() && smagicOK32(c) && config.RegSize == 4) { + break + } + v.reset(OpRsh32Ux64) + v.Type = t + v0 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v1.AuxInt = int32ToAuxInt(int32(smagic32(c).m)) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64ToAuxInt(smagic32(c).s) + v.AddArg2(v0, v2) + return true + } + // match: (Div32u x (Const32 [c])) + // cond: umagicOK32(c) && umagic32(c).m&1 == 0 && config.RegSize == 8 + // result: (Trunc64to32 (Rsh64Ux64 (Mul64 (ZeroExt32to64 x) (Const64 [int64(1<<31 + umagic32(c).m/2)])) (Const64 [32 + umagic32(c).s - 1]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(umagicOK32(c) && umagic32(c).m&1 == 0 && config.RegSize == 8) { + break + } + v.reset(OpTrunc64to32) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(int64(1<<31 + umagic32(c).m/2)) + v1.AddArg2(v2, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(32 + umagic32(c).s - 1) + v0.AddArg2(v1, v4) + v.AddArg(v0) + return true + } + // match: (Div32u x (Const32 [c])) + // cond: umagicOK32(c) && umagic32(c).m&1 == 0 && config.RegSize == 4 + // result: (Rsh32Ux64 (Hmul32u x (Const32 [int32(1<<31 + umagic32(c).m/2)])) (Const64 [umagic32(c).s - 1])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(umagicOK32(c) && umagic32(c).m&1 == 0 && config.RegSize == 4) { + break + } + v.reset(OpRsh32Ux64) + v.Type = t + v0 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v1.AuxInt = int32ToAuxInt(int32(1<<31 + umagic32(c).m/2)) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64ToAuxInt(umagic32(c).s - 1) + v.AddArg2(v0, v2) + return true + } + // match: (Div32u x (Const32 [c])) + // cond: umagicOK32(c) && config.RegSize == 8 && c&1 == 0 + // result: (Trunc64to32 (Rsh64Ux64 (Mul64 (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1])) (Const64 [int64(1<<31 + (umagic32(c).m+1)/2)])) (Const64 [32 + umagic32(c).s - 2]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(umagicOK32(c) && config.RegSize == 8 && c&1 == 0) { + break + } + v.reset(OpTrunc64to32) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(1) + v2.AddArg2(v3, v4) + v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5.AuxInt = int64ToAuxInt(int64(1<<31 + (umagic32(c).m+1)/2)) + v1.AddArg2(v2, v5) + v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v6.AuxInt = int64ToAuxInt(32 + umagic32(c).s - 2) + v0.AddArg2(v1, v6) + v.AddArg(v0) + return true + } + // match: (Div32u x (Const32 [c])) + // cond: umagicOK32(c) && config.RegSize == 4 && c&1 == 0 + // result: (Rsh32Ux64 (Hmul32u (Rsh32Ux64 x (Const64 [1])) (Const32 [int32(1<<31 + (umagic32(c).m+1)/2)])) (Const64 [umagic32(c).s - 2])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(umagicOK32(c) && config.RegSize == 4 && c&1 == 0) { + break + } + v.reset(OpRsh32Ux64) + v.Type = t + v0 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64ToAuxInt(1) + v1.AddArg2(x, v2) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(int32(1<<31 + (umagic32(c).m+1)/2)) + v0.AddArg2(v1, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(umagic32(c).s - 2) + v.AddArg2(v0, v4) + return true + } + // match: (Div32u x (Const32 [c])) + // cond: umagicOK32(c) && config.RegSize == 8 + // result: (Trunc64to32 (Rsh64Ux64 (Avg64u (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) (Mul64 (ZeroExt32to64 x) (Const64 [int64(umagic32(c).m)]))) (Const64 [32 + umagic32(c).s - 1]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(umagicOK32(c) && config.RegSize == 8) { + break + } + v.reset(OpTrunc64to32) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpAvg64u, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpLsh64x64, typ.UInt64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(32) + v2.AddArg2(v3, v4) + v5 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) + v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt32) + v6.AuxInt = int64ToAuxInt(int64(umagic32(c).m)) + v5.AddArg2(v3, v6) + v1.AddArg2(v2, v5) + v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v7.AuxInt = int64ToAuxInt(32 + umagic32(c).s - 1) + v0.AddArg2(v1, v7) + v.AddArg(v0) + return true + } + // match: (Div32u x (Const32 [c])) + // cond: umagicOK32(c) && config.RegSize == 4 + // result: (Rsh32Ux64 (Avg32u x (Hmul32u x (Const32 [int32(umagic32(c).m)]))) (Const64 [umagic32(c).s - 1])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(umagicOK32(c) && config.RegSize == 4) { + break + } + v.reset(OpRsh32Ux64) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAvg32u, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int32ToAuxInt(int32(umagic32(c).m)) + v1.AddArg2(x, v2) + v0.AddArg2(x, v1) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(umagic32(c).s - 1) + v.AddArg2(v0, v3) + return true + } + return false +} +func rewriteValuedivmod_OpDiv64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div64 n (Const64 [c])) + // cond: isPowerOfTwo(c) + // result: (Rsh64x64 (Add64 n (Rsh64Ux64 (Rsh64x64 n (Const64 [63])) (Const64 [int64(64-log64(c))]))) (Const64 [int64(log64(c))])) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo(c)) { + break + } + v.reset(OpRsh64x64) + v0 := b.NewValue0(v.Pos, OpAdd64, t) + v1 := b.NewValue0(v.Pos, OpRsh64Ux64, t) + v2 := b.NewValue0(v.Pos, OpRsh64x64, t) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(63) + v2.AddArg2(n, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(int64(64 - log64(c))) + v1.AddArg2(v2, v4) + v0.AddArg2(n, v1) + v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5.AuxInt = int64ToAuxInt(int64(log64(c))) + v.AddArg2(v0, v5) + return true + } + // match: (Div64 x (Const64 [c])) + // cond: smagicOK64(c) && smagic64(c).m&1 == 0 + // result: (Sub64 (Rsh64x64 (Hmul64 x (Const64 [int64(smagic64(c).m/2)])) (Const64 [smagic64(c).s - 1])) (Rsh64x64 x (Const64 [63]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(smagicOK64(c) && smagic64(c).m&1 == 0) { + break + } + v.reset(OpSub64) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRsh64x64, t) + v1 := b.NewValue0(v.Pos, OpHmul64, t) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64ToAuxInt(int64(smagic64(c).m / 2)) + v1.AddArg2(x, v2) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(smagic64(c).s - 1) + v0.AddArg2(v1, v3) + v4 := b.NewValue0(v.Pos, OpRsh64x64, t) + v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) + return true + } + // match: (Div64 x (Const64 [c])) + // cond: smagicOK64(c) && smagic64(c).m&1 != 0 + // result: (Sub64 (Rsh64x64 (Add64 x (Hmul64 x (Const64 [int64(smagic64(c).m)]))) (Const64 [smagic64(c).s])) (Rsh64x64 x (Const64 [63]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(smagicOK64(c) && smagic64(c).m&1 != 0) { + break + } + v.reset(OpSub64) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRsh64x64, t) + v1 := b.NewValue0(v.Pos, OpAdd64, t) + v2 := b.NewValue0(v.Pos, OpHmul64, t) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(int64(smagic64(c).m)) + v2.AddArg2(x, v3) + v1.AddArg2(x, v2) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(smagic64(c).s) + v0.AddArg2(v1, v4) + v5 := b.NewValue0(v.Pos, OpRsh64x64, t) + v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v6.AuxInt = int64ToAuxInt(63) + v5.AddArg2(x, v6) + v.AddArg2(v0, v5) + return true + } + return false +} +func rewriteValuedivmod_OpDiv64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div64u x (Const64 [c])) + // cond: t.IsSigned() && smagicOK64(c) + // result: (Rsh64Ux64 (Hmul64u x (Const64 [int64(smagic64(c).m)])) (Const64 [smagic64(c).s])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(t.IsSigned() && smagicOK64(c)) { + break + } + v.reset(OpRsh64Ux64) + v.Type = t + v0 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v1.AuxInt = int64ToAuxInt(int64(smagic64(c).m)) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64ToAuxInt(smagic64(c).s) + v.AddArg2(v0, v2) + return true + } + // match: (Div64u x (Const64 [c])) + // cond: umagicOK64(c) && umagic64(c).m&1 == 0 + // result: (Rsh64Ux64 (Hmul64u x (Const64 [int64(1<<63 + umagic64(c).m/2)])) (Const64 [umagic64(c).s - 1])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(umagicOK64(c) && umagic64(c).m&1 == 0) { + break + } + v.reset(OpRsh64Ux64) + v.Type = t + v0 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v1.AuxInt = int64ToAuxInt(int64(1<<63 + umagic64(c).m/2)) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64ToAuxInt(umagic64(c).s - 1) + v.AddArg2(v0, v2) + return true + } + // match: (Div64u x (Const64 [c])) + // cond: umagicOK64(c) && c&1 == 0 + // result: (Rsh64Ux64 (Hmul64u (Rsh64Ux64 x (Const64 [1])) (Const64 [int64(1<<63 + (umagic64(c).m+1)/2)])) (Const64 [umagic64(c).s - 2])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(umagicOK64(c) && c&1 == 0) { + break + } + v.reset(OpRsh64Ux64) + v.Type = t + v0 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64ToAuxInt(1) + v1.AddArg2(x, v2) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(int64(1<<63 + (umagic64(c).m+1)/2)) + v0.AddArg2(v1, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(umagic64(c).s - 2) + v.AddArg2(v0, v4) + return true + } + // match: (Div64u x (Const64 [c])) + // cond: umagicOK64(c) + // result: (Rsh64Ux64 (Avg64u x (Hmul64u x (Const64 [int64(umagic64(c).m)]))) (Const64 [umagic64(c).s - 1])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(umagicOK64(c)) { + break + } + v.reset(OpRsh64Ux64) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAvg64u, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64ToAuxInt(int64(umagic64(c).m)) + v1.AddArg2(x, v2) + v0.AddArg2(x, v1) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(umagic64(c).s - 1) + v.AddArg2(v0, v3) + return true + } + return false +} +func rewriteValuedivmod_OpDiv8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8 n (Const8 [c])) + // cond: isPowerOfTwo(c) + // result: (Rsh8x64 (Add8 n (Rsh8Ux64 (Rsh8x64 n (Const64 [ 7])) (Const64 [int64( 8-log8(c))]))) (Const64 [int64(log8(c))])) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + if !(isPowerOfTwo(c)) { + break + } + v.reset(OpRsh8x64) + v0 := b.NewValue0(v.Pos, OpAdd8, t) + v1 := b.NewValue0(v.Pos, OpRsh8Ux64, t) + v2 := b.NewValue0(v.Pos, OpRsh8x64, t) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(7) + v2.AddArg2(n, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(int64(8 - log8(c))) + v1.AddArg2(v2, v4) + v0.AddArg2(n, v1) + v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5.AuxInt = int64ToAuxInt(int64(log8(c))) + v.AddArg2(v0, v5) + return true + } + // match: (Div8 x (Const8 [c])) + // cond: smagicOK8(c) + // result: (Sub8 (Rsh32x64 (Mul32 (SignExt8to32 x) (Const32 [int32(smagic8(c).m)])) (Const64 [8 + smagic8(c).s])) (Rsh32x64 (SignExt8to32 x) (Const64 [31]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + if !(smagicOK8(c)) { + break + } + v.reset(OpSub8) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRsh32x64, t) + v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(int32(smagic8(c).m)) + v1.AddArg2(v2, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(8 + smagic8(c).s) + v0.AddArg2(v1, v4) + v5 := b.NewValue0(v.Pos, OpRsh32x64, t) + v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v6.AuxInt = int64ToAuxInt(31) + v5.AddArg2(v2, v6) + v.AddArg2(v0, v5) + return true + } + return false +} +func rewriteValuedivmod_OpDiv8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8u x (Const8 [c])) + // cond: umagicOK8(c) + // result: (Trunc32to8 (Rsh32Ux64 (Mul32 (ZeroExt8to32 x) (Const32 [int32(1<<8 + umagic8(c).m)])) (Const64 [8 + umagic8(c).s]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + if !(umagicOK8(c)) { + break + } + v.reset(OpTrunc32to8) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(int32(1<<8 + umagic8(c).m)) + v1.AddArg2(v2, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(8 + umagic8(c).s) + v0.AddArg2(v1, v4) + v.AddArg(v0) + return true + } + return false +} +func rewriteBlockdivmod(b *Block) bool { + return false +} diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index bda3d5116a6..5b5494f43af 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -6607,12 +6607,16 @@ func rewriteValuegeneric_OpCtz8(v *Value) bool { func rewriteValuegeneric_OpCvt32Fto32(v *Value) bool { v_0 := v.Args[0] // match: (Cvt32Fto32 (Const32F [c])) + // cond: c >= -1<<31 && c < 1<<31 // result: (Const32 [int32(c)]) for { if v_0.Op != OpConst32F { break } c := auxIntToFloat32(v_0.AuxInt) + if !(c >= -1<<31 && c < 1<<31) { + break + } v.reset(OpConst32) v.AuxInt = int32ToAuxInt(int32(c)) return true @@ -6622,12 +6626,16 @@ func rewriteValuegeneric_OpCvt32Fto32(v *Value) bool { func rewriteValuegeneric_OpCvt32Fto64(v *Value) bool { v_0 := v.Args[0] // match: (Cvt32Fto64 (Const32F [c])) + // cond: c >= -1<<63 && c < 1<<63 // result: (Const64 [int64(c)]) for { if v_0.Op != OpConst32F { break } c := auxIntToFloat32(v_0.AuxInt) + if !(c >= -1<<63 && c < 1<<63) { + break + } v.reset(OpConst64) v.AuxInt = int64ToAuxInt(int64(c)) return true @@ -6682,12 +6690,16 @@ func rewriteValuegeneric_OpCvt32to64F(v *Value) bool { func rewriteValuegeneric_OpCvt64Fto32(v *Value) bool { v_0 := v.Args[0] // match: (Cvt64Fto32 (Const64F [c])) + // cond: c >= -1<<31 && c < 1<<31 // result: (Const32 [int32(c)]) for { if v_0.Op != OpConst64F { break } c := auxIntToFloat64(v_0.AuxInt) + if !(c >= -1<<31 && c < 1<<31) { + break + } v.reset(OpConst32) v.AuxInt = int32ToAuxInt(int32(c)) return true @@ -6732,12 +6744,16 @@ func rewriteValuegeneric_OpCvt64Fto32F(v *Value) bool { func rewriteValuegeneric_OpCvt64Fto64(v *Value) bool { v_0 := v.Args[0] // match: (Cvt64Fto64 (Const64F [c])) + // cond: c >= -1<<63 && c < 1<<63 // result: (Const64 [int64(c)]) for { if v_0.Op != OpConst64F { break } c := auxIntToFloat64(v_0.AuxInt) + if !(c >= -1<<63 && c < 1<<63) { + break + } v.reset(OpConst64) v.AuxInt = int64ToAuxInt(int64(c)) return true @@ -6846,24 +6862,6 @@ func rewriteValuegeneric_OpDiv16(v *Value) bool { v.AuxInt = int16ToAuxInt(c / d) return true } - // match: (Div16 n (Const16 [c])) - // cond: isNonNegative(n) && isPowerOfTwo(c) - // result: (Rsh16Ux64 n (Const64 [log16(c)])) - for { - n := v_0 - if v_1.Op != OpConst16 { - break - } - c := auxIntToInt16(v_1.AuxInt) - if !(isNonNegative(n) && isPowerOfTwo(c)) { - break - } - v.reset(OpRsh16Ux64) - v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v0.AuxInt = int64ToAuxInt(log16(c)) - v.AddArg2(n, v0) - return true - } // match: (Div16 n (Const16 [c])) // cond: c < 0 && c != -1<<15 // result: (Neg16 (Div16 n (Const16 [-c]))) @@ -6903,74 +6901,12 @@ func rewriteValuegeneric_OpDiv16(v *Value) bool { v.AddArg2(v0, v2) return true } - // match: (Div16 n (Const16 [c])) - // cond: isPowerOfTwo(c) - // result: (Rsh16x64 (Add16 n (Rsh16Ux64 (Rsh16x64 n (Const64 [15])) (Const64 [int64(16-log16(c))]))) (Const64 [int64(log16(c))])) - for { - t := v.Type - n := v_0 - if v_1.Op != OpConst16 { - break - } - c := auxIntToInt16(v_1.AuxInt) - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpRsh16x64) - v0 := b.NewValue0(v.Pos, OpAdd16, t) - v1 := b.NewValue0(v.Pos, OpRsh16Ux64, t) - v2 := b.NewValue0(v.Pos, OpRsh16x64, t) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64ToAuxInt(15) - v2.AddArg2(n, v3) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64ToAuxInt(int64(16 - log16(c))) - v1.AddArg2(v2, v4) - v0.AddArg2(n, v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64ToAuxInt(int64(log16(c))) - v.AddArg2(v0, v5) - return true - } - // match: (Div16 x (Const16 [c])) - // cond: smagicOK16(c) - // result: (Sub16 (Rsh32x64 (Mul32 (Const32 [int32(smagic16(c).m)]) (SignExt16to32 x)) (Const64 [16+smagic16(c).s])) (Rsh32x64 (SignExt16to32 x) (Const64 [31]))) - for { - t := v.Type - x := v_0 - if v_1.Op != OpConst16 { - break - } - c := auxIntToInt16(v_1.AuxInt) - if !(smagicOK16(c)) { - break - } - v.reset(OpSub16) - v.Type = t - v0 := b.NewValue0(v.Pos, OpRsh32x64, t) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int32ToAuxInt(int32(smagic16(c).m)) - v3 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v3.AddArg(x) - v1.AddArg2(v2, v3) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64ToAuxInt(16 + smagic16(c).s) - v0.AddArg2(v1, v4) - v5 := b.NewValue0(v.Pos, OpRsh32x64, t) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64ToAuxInt(31) - v5.AddArg2(v3, v6) - v.AddArg2(v0, v5) - return true - } return false } func rewriteValuegeneric_OpDiv16u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - config := b.Func.Config typ := &b.Func.Config.Types // match: (Div16u (Const16 [c]) (Const16 [d])) // cond: d != 0 @@ -7009,127 +6945,12 @@ func rewriteValuegeneric_OpDiv16u(v *Value) bool { v.AddArg2(n, v0) return true } - // match: (Div16u x (Const16 [c])) - // cond: umagicOK16(c) && config.RegSize == 8 - // result: (Trunc64to16 (Rsh64Ux64 (Mul64 (Const64 [int64(1<<16+umagic16(c).m)]) (ZeroExt16to64 x)) (Const64 [16+umagic16(c).s]))) - for { - x := v_0 - if v_1.Op != OpConst16 { - break - } - c := auxIntToInt16(v_1.AuxInt) - if !(umagicOK16(c) && config.RegSize == 8) { - break - } - v.reset(OpTrunc64to16) - v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64ToAuxInt(int64(1<<16 + umagic16(c).m)) - v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v3.AddArg(x) - v1.AddArg2(v2, v3) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64ToAuxInt(16 + umagic16(c).s) - v0.AddArg2(v1, v4) - v.AddArg(v0) - return true - } - // match: (Div16u x (Const16 [c])) - // cond: umagicOK16(c) && config.RegSize == 4 && umagic16(c).m&1 == 0 - // result: (Trunc32to16 (Rsh32Ux64 (Mul32 (Const32 [int32(1<<15+umagic16(c).m/2)]) (ZeroExt16to32 x)) (Const64 [16+umagic16(c).s-1]))) - for { - x := v_0 - if v_1.Op != OpConst16 { - break - } - c := auxIntToInt16(v_1.AuxInt) - if !(umagicOK16(c) && config.RegSize == 4 && umagic16(c).m&1 == 0) { - break - } - v.reset(OpTrunc32to16) - v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int32ToAuxInt(int32(1<<15 + umagic16(c).m/2)) - v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v3.AddArg(x) - v1.AddArg2(v2, v3) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64ToAuxInt(16 + umagic16(c).s - 1) - v0.AddArg2(v1, v4) - v.AddArg(v0) - return true - } - // match: (Div16u x (Const16 [c])) - // cond: umagicOK16(c) && config.RegSize == 4 && c&1 == 0 - // result: (Trunc32to16 (Rsh32Ux64 (Mul32 (Const32 [int32(1<<15+(umagic16(c).m+1)/2)]) (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1]))) (Const64 [16+umagic16(c).s-2]))) - for { - x := v_0 - if v_1.Op != OpConst16 { - break - } - c := auxIntToInt16(v_1.AuxInt) - if !(umagicOK16(c) && config.RegSize == 4 && c&1 == 0) { - break - } - v.reset(OpTrunc32to16) - v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int32ToAuxInt(int32(1<<15 + (umagic16(c).m+1)/2)) - v3 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32) - v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v4.AddArg(x) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64ToAuxInt(1) - v3.AddArg2(v4, v5) - v1.AddArg2(v2, v3) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64ToAuxInt(16 + umagic16(c).s - 2) - v0.AddArg2(v1, v6) - v.AddArg(v0) - return true - } - // match: (Div16u x (Const16 [c])) - // cond: umagicOK16(c) && config.RegSize == 4 && config.useAvg - // result: (Trunc32to16 (Rsh32Ux64 (Avg32u (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) (Mul32 (Const32 [int32(umagic16(c).m)]) (ZeroExt16to32 x))) (Const64 [16+umagic16(c).s-1]))) - for { - x := v_0 - if v_1.Op != OpConst16 { - break - } - c := auxIntToInt16(v_1.AuxInt) - if !(umagicOK16(c) && config.RegSize == 4 && config.useAvg) { - break - } - v.reset(OpTrunc32to16) - v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAvg32u, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpLsh32x64, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v3.AddArg(x) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64ToAuxInt(16) - v2.AddArg2(v3, v4) - v5 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int32ToAuxInt(int32(umagic16(c).m)) - v5.AddArg2(v6, v3) - v1.AddArg2(v2, v5) - v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v7.AuxInt = int64ToAuxInt(16 + umagic16(c).s - 1) - v0.AddArg2(v1, v7) - v.AddArg(v0) - return true - } return false } func rewriteValuegeneric_OpDiv32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - config := b.Func.Config typ := &b.Func.Config.Types // match: (Div32 (Const32 [c]) (Const32 [d])) // cond: d != 0 @@ -7150,24 +6971,6 @@ func rewriteValuegeneric_OpDiv32(v *Value) bool { v.AuxInt = int32ToAuxInt(c / d) return true } - // match: (Div32 n (Const32 [c])) - // cond: isNonNegative(n) && isPowerOfTwo(c) - // result: (Rsh32Ux64 n (Const64 [log32(c)])) - for { - n := v_0 - if v_1.Op != OpConst32 { - break - } - c := auxIntToInt32(v_1.AuxInt) - if !(isNonNegative(n) && isPowerOfTwo(c)) { - break - } - v.reset(OpRsh32Ux64) - v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v0.AuxInt = int64ToAuxInt(log32(c)) - v.AddArg2(n, v0) - return true - } // match: (Div32 n (Const32 [c])) // cond: c < 0 && c != -1<<31 // result: (Neg32 (Div32 n (Const32 [-c]))) @@ -7207,129 +7010,6 @@ func rewriteValuegeneric_OpDiv32(v *Value) bool { v.AddArg2(v0, v2) return true } - // match: (Div32 n (Const32 [c])) - // cond: isPowerOfTwo(c) - // result: (Rsh32x64 (Add32 n (Rsh32Ux64 (Rsh32x64 n (Const64 [31])) (Const64 [int64(32-log32(c))]))) (Const64 [int64(log32(c))])) - for { - t := v.Type - n := v_0 - if v_1.Op != OpConst32 { - break - } - c := auxIntToInt32(v_1.AuxInt) - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpRsh32x64) - v0 := b.NewValue0(v.Pos, OpAdd32, t) - v1 := b.NewValue0(v.Pos, OpRsh32Ux64, t) - v2 := b.NewValue0(v.Pos, OpRsh32x64, t) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64ToAuxInt(31) - v2.AddArg2(n, v3) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64ToAuxInt(int64(32 - log32(c))) - v1.AddArg2(v2, v4) - v0.AddArg2(n, v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64ToAuxInt(int64(log32(c))) - v.AddArg2(v0, v5) - return true - } - // match: (Div32 x (Const32 [c])) - // cond: smagicOK32(c) && config.RegSize == 8 - // result: (Sub32 (Rsh64x64 (Mul64 (Const64 [int64(smagic32(c).m)]) (SignExt32to64 x)) (Const64 [32+smagic32(c).s])) (Rsh64x64 (SignExt32to64 x) (Const64 [63]))) - for { - t := v.Type - x := v_0 - if v_1.Op != OpConst32 { - break - } - c := auxIntToInt32(v_1.AuxInt) - if !(smagicOK32(c) && config.RegSize == 8) { - break - } - v.reset(OpSub32) - v.Type = t - v0 := b.NewValue0(v.Pos, OpRsh64x64, t) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64ToAuxInt(int64(smagic32(c).m)) - v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) - v3.AddArg(x) - v1.AddArg2(v2, v3) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64ToAuxInt(32 + smagic32(c).s) - v0.AddArg2(v1, v4) - v5 := b.NewValue0(v.Pos, OpRsh64x64, t) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64ToAuxInt(63) - v5.AddArg2(v3, v6) - v.AddArg2(v0, v5) - return true - } - // match: (Div32 x (Const32 [c])) - // cond: smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 && config.useHmul - // result: (Sub32 (Rsh32x64 (Hmul32 (Const32 [int32(smagic32(c).m/2)]) x) (Const64 [smagic32(c).s-1])) (Rsh32x64 x (Const64 [31]))) - for { - t := v.Type - x := v_0 - if v_1.Op != OpConst32 { - break - } - c := auxIntToInt32(v_1.AuxInt) - if !(smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 && config.useHmul) { - break - } - v.reset(OpSub32) - v.Type = t - v0 := b.NewValue0(v.Pos, OpRsh32x64, t) - v1 := b.NewValue0(v.Pos, OpHmul32, t) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int32ToAuxInt(int32(smagic32(c).m / 2)) - v1.AddArg2(v2, x) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64ToAuxInt(smagic32(c).s - 1) - v0.AddArg2(v1, v3) - v4 := b.NewValue0(v.Pos, OpRsh32x64, t) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64ToAuxInt(31) - v4.AddArg2(x, v5) - v.AddArg2(v0, v4) - return true - } - // match: (Div32 x (Const32 [c])) - // cond: smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 && config.useHmul - // result: (Sub32 (Rsh32x64 (Add32 (Hmul32 (Const32 [int32(smagic32(c).m)]) x) x) (Const64 [smagic32(c).s])) (Rsh32x64 x (Const64 [31]))) - for { - t := v.Type - x := v_0 - if v_1.Op != OpConst32 { - break - } - c := auxIntToInt32(v_1.AuxInt) - if !(smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 && config.useHmul) { - break - } - v.reset(OpSub32) - v.Type = t - v0 := b.NewValue0(v.Pos, OpRsh32x64, t) - v1 := b.NewValue0(v.Pos, OpAdd32, t) - v2 := b.NewValue0(v.Pos, OpHmul32, t) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int32ToAuxInt(int32(smagic32(c).m)) - v2.AddArg2(v3, x) - v1.AddArg2(v2, x) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64ToAuxInt(smagic32(c).s) - v0.AddArg2(v1, v4) - v5 := b.NewValue0(v.Pos, OpRsh32x64, t) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64ToAuxInt(31) - v5.AddArg2(x, v6) - v.AddArg2(v0, v5) - return true - } return false } func rewriteValuegeneric_OpDiv32F(v *Value) bool { @@ -7380,7 +7060,6 @@ func rewriteValuegeneric_OpDiv32u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - config := b.Func.Config typ := &b.Func.Config.Types // match: (Div32u (Const32 [c]) (Const32 [d])) // cond: d != 0 @@ -7419,176 +7098,12 @@ func rewriteValuegeneric_OpDiv32u(v *Value) bool { v.AddArg2(n, v0) return true } - // match: (Div32u x (Const32 [c])) - // cond: umagicOK32(c) && config.RegSize == 4 && umagic32(c).m&1 == 0 && config.useHmul - // result: (Rsh32Ux64 (Hmul32u (Const32 [int32(1<<31+umagic32(c).m/2)]) x) (Const64 [umagic32(c).s-1])) - for { - x := v_0 - if v_1.Op != OpConst32 { - break - } - c := auxIntToInt32(v_1.AuxInt) - if !(umagicOK32(c) && config.RegSize == 4 && umagic32(c).m&1 == 0 && config.useHmul) { - break - } - v.reset(OpRsh32Ux64) - v.Type = typ.UInt32 - v0 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v1.AuxInt = int32ToAuxInt(int32(1<<31 + umagic32(c).m/2)) - v0.AddArg2(v1, x) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64ToAuxInt(umagic32(c).s - 1) - v.AddArg2(v0, v2) - return true - } - // match: (Div32u x (Const32 [c])) - // cond: umagicOK32(c) && config.RegSize == 4 && c&1 == 0 && config.useHmul - // result: (Rsh32Ux64 (Hmul32u (Const32 [int32(1<<31+(umagic32(c).m+1)/2)]) (Rsh32Ux64 x (Const64 [1]))) (Const64 [umagic32(c).s-2])) - for { - x := v_0 - if v_1.Op != OpConst32 { - break - } - c := auxIntToInt32(v_1.AuxInt) - if !(umagicOK32(c) && config.RegSize == 4 && c&1 == 0 && config.useHmul) { - break - } - v.reset(OpRsh32Ux64) - v.Type = typ.UInt32 - v0 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v1.AuxInt = int32ToAuxInt(int32(1<<31 + (umagic32(c).m+1)/2)) - v2 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64ToAuxInt(1) - v2.AddArg2(x, v3) - v0.AddArg2(v1, v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64ToAuxInt(umagic32(c).s - 2) - v.AddArg2(v0, v4) - return true - } - // match: (Div32u x (Const32 [c])) - // cond: umagicOK32(c) && config.RegSize == 4 && config.useAvg && config.useHmul - // result: (Rsh32Ux64 (Avg32u x (Hmul32u (Const32 [int32(umagic32(c).m)]) x)) (Const64 [umagic32(c).s-1])) - for { - x := v_0 - if v_1.Op != OpConst32 { - break - } - c := auxIntToInt32(v_1.AuxInt) - if !(umagicOK32(c) && config.RegSize == 4 && config.useAvg && config.useHmul) { - break - } - v.reset(OpRsh32Ux64) - v.Type = typ.UInt32 - v0 := b.NewValue0(v.Pos, OpAvg32u, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int32ToAuxInt(int32(umagic32(c).m)) - v1.AddArg2(v2, x) - v0.AddArg2(x, v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64ToAuxInt(umagic32(c).s - 1) - v.AddArg2(v0, v3) - return true - } - // match: (Div32u x (Const32 [c])) - // cond: umagicOK32(c) && config.RegSize == 8 && umagic32(c).m&1 == 0 - // result: (Trunc64to32 (Rsh64Ux64 (Mul64 (Const64 [int64(1<<31+umagic32(c).m/2)]) (ZeroExt32to64 x)) (Const64 [32+umagic32(c).s-1]))) - for { - x := v_0 - if v_1.Op != OpConst32 { - break - } - c := auxIntToInt32(v_1.AuxInt) - if !(umagicOK32(c) && config.RegSize == 8 && umagic32(c).m&1 == 0) { - break - } - v.reset(OpTrunc64to32) - v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64ToAuxInt(int64(1<<31 + umagic32(c).m/2)) - v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v3.AddArg(x) - v1.AddArg2(v2, v3) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64ToAuxInt(32 + umagic32(c).s - 1) - v0.AddArg2(v1, v4) - v.AddArg(v0) - return true - } - // match: (Div32u x (Const32 [c])) - // cond: umagicOK32(c) && config.RegSize == 8 && c&1 == 0 - // result: (Trunc64to32 (Rsh64Ux64 (Mul64 (Const64 [int64(1<<31+(umagic32(c).m+1)/2)]) (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1]))) (Const64 [32+umagic32(c).s-2]))) - for { - x := v_0 - if v_1.Op != OpConst32 { - break - } - c := auxIntToInt32(v_1.AuxInt) - if !(umagicOK32(c) && config.RegSize == 8 && c&1 == 0) { - break - } - v.reset(OpTrunc64to32) - v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64ToAuxInt(int64(1<<31 + (umagic32(c).m+1)/2)) - v3 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64) - v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v4.AddArg(x) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64ToAuxInt(1) - v3.AddArg2(v4, v5) - v1.AddArg2(v2, v3) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64ToAuxInt(32 + umagic32(c).s - 2) - v0.AddArg2(v1, v6) - v.AddArg(v0) - return true - } - // match: (Div32u x (Const32 [c])) - // cond: umagicOK32(c) && config.RegSize == 8 && config.useAvg - // result: (Trunc64to32 (Rsh64Ux64 (Avg64u (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) (Mul64 (Const64 [int64(umagic32(c).m)]) (ZeroExt32to64 x))) (Const64 [32+umagic32(c).s-1]))) - for { - x := v_0 - if v_1.Op != OpConst32 { - break - } - c := auxIntToInt32(v_1.AuxInt) - if !(umagicOK32(c) && config.RegSize == 8 && config.useAvg) { - break - } - v.reset(OpTrunc64to32) - v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAvg64u, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpLsh64x64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v3.AddArg(x) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64ToAuxInt(32) - v2.AddArg2(v3, v4) - v5 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt32) - v6.AuxInt = int64ToAuxInt(int64(umagic32(c).m)) - v5.AddArg2(v6, v3) - v1.AddArg2(v2, v5) - v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v7.AuxInt = int64ToAuxInt(32 + umagic32(c).s - 1) - v0.AddArg2(v1, v7) - v.AddArg(v0) - return true - } return false } func rewriteValuegeneric_OpDiv64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - config := b.Func.Config typ := &b.Func.Config.Types // match: (Div64 (Const64 [c]) (Const64 [d])) // cond: d != 0 @@ -7609,36 +7124,6 @@ func rewriteValuegeneric_OpDiv64(v *Value) bool { v.AuxInt = int64ToAuxInt(c / d) return true } - // match: (Div64 n (Const64 [c])) - // cond: isNonNegative(n) && isPowerOfTwo(c) - // result: (Rsh64Ux64 n (Const64 [log64(c)])) - for { - n := v_0 - if v_1.Op != OpConst64 { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(isNonNegative(n) && isPowerOfTwo(c)) { - break - } - v.reset(OpRsh64Ux64) - v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v0.AuxInt = int64ToAuxInt(log64(c)) - v.AddArg2(n, v0) - return true - } - // match: (Div64 n (Const64 [-1<<63])) - // cond: isNonNegative(n) - // result: (Const64 [0]) - for { - n := v_0 - if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1<<63 || !(isNonNegative(n)) { - break - } - v.reset(OpConst64) - v.AuxInt = int64ToAuxInt(0) - return true - } // match: (Div64 n (Const64 [c])) // cond: c < 0 && c != -1<<63 // result: (Neg64 (Div64 n (Const64 [-c]))) @@ -7660,6 +7145,18 @@ func rewriteValuegeneric_OpDiv64(v *Value) bool { v.AddArg(v0) return true } + // match: (Div64 x (Const64 [-1<<63])) + // cond: isNonNegative(x) + // result: (Const64 [0]) + for { + x := v_0 + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1<<63 || !(isNonNegative(x)) { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } // match: (Div64 x (Const64 [-1<<63])) // result: (Rsh64Ux64 (And64 x (Neg64 x)) (Const64 [63])) for { @@ -7678,97 +7175,6 @@ func rewriteValuegeneric_OpDiv64(v *Value) bool { v.AddArg2(v0, v2) return true } - // match: (Div64 n (Const64 [c])) - // cond: isPowerOfTwo(c) - // result: (Rsh64x64 (Add64 n (Rsh64Ux64 (Rsh64x64 n (Const64 [63])) (Const64 [int64(64-log64(c))]))) (Const64 [int64(log64(c))])) - for { - t := v.Type - n := v_0 - if v_1.Op != OpConst64 { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpRsh64x64) - v0 := b.NewValue0(v.Pos, OpAdd64, t) - v1 := b.NewValue0(v.Pos, OpRsh64Ux64, t) - v2 := b.NewValue0(v.Pos, OpRsh64x64, t) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64ToAuxInt(63) - v2.AddArg2(n, v3) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64ToAuxInt(int64(64 - log64(c))) - v1.AddArg2(v2, v4) - v0.AddArg2(n, v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64ToAuxInt(int64(log64(c))) - v.AddArg2(v0, v5) - return true - } - // match: (Div64 x (Const64 [c])) - // cond: smagicOK64(c) && smagic64(c).m&1 == 0 && config.useHmul - // result: (Sub64 (Rsh64x64 (Hmul64 (Const64 [int64(smagic64(c).m/2)]) x) (Const64 [smagic64(c).s-1])) (Rsh64x64 x (Const64 [63]))) - for { - t := v.Type - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(smagicOK64(c) && smagic64(c).m&1 == 0 && config.useHmul) { - break - } - v.reset(OpSub64) - v.Type = t - v0 := b.NewValue0(v.Pos, OpRsh64x64, t) - v1 := b.NewValue0(v.Pos, OpHmul64, t) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64ToAuxInt(int64(smagic64(c).m / 2)) - v1.AddArg2(v2, x) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64ToAuxInt(smagic64(c).s - 1) - v0.AddArg2(v1, v3) - v4 := b.NewValue0(v.Pos, OpRsh64x64, t) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64ToAuxInt(63) - v4.AddArg2(x, v5) - v.AddArg2(v0, v4) - return true - } - // match: (Div64 x (Const64 [c])) - // cond: smagicOK64(c) && smagic64(c).m&1 != 0 && config.useHmul - // result: (Sub64 (Rsh64x64 (Add64 (Hmul64 (Const64 [int64(smagic64(c).m)]) x) x) (Const64 [smagic64(c).s])) (Rsh64x64 x (Const64 [63]))) - for { - t := v.Type - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(smagicOK64(c) && smagic64(c).m&1 != 0 && config.useHmul) { - break - } - v.reset(OpSub64) - v.Type = t - v0 := b.NewValue0(v.Pos, OpRsh64x64, t) - v1 := b.NewValue0(v.Pos, OpAdd64, t) - v2 := b.NewValue0(v.Pos, OpHmul64, t) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64ToAuxInt(int64(smagic64(c).m)) - v2.AddArg2(v3, x) - v1.AddArg2(v2, x) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64ToAuxInt(smagic64(c).s) - v0.AddArg2(v1, v4) - v5 := b.NewValue0(v.Pos, OpRsh64x64, t) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64ToAuxInt(63) - v5.AddArg2(x, v6) - v.AddArg2(v0, v5) - return true - } return false } func rewriteValuegeneric_OpDiv64F(v *Value) bool { @@ -7819,7 +7225,6 @@ func rewriteValuegeneric_OpDiv64u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - config := b.Func.Config typ := &b.Func.Config.Types // match: (Div64u (Const64 [c]) (Const64 [d])) // cond: d != 0 @@ -7858,141 +7263,6 @@ func rewriteValuegeneric_OpDiv64u(v *Value) bool { v.AddArg2(n, v0) return true } - // match: (Div64u x (Const64 [c])) - // cond: c > 0 && c <= 0xFFFF && umagicOK32(int32(c)) && config.RegSize == 4 && config.useHmul - // result: (Add64 (Add64 (Add64 (Lsh64x64 (ZeroExt32to64 (Div32u (Trunc64to32 (Rsh64Ux64 x (Const64 [32]))) (Const32 [int32(c)]))) (Const64 [32])) (ZeroExt32to64 (Div32u (Trunc64to32 x) (Const32 [int32(c)])))) (Mul64 (ZeroExt32to64 (Mod32u (Trunc64to32 (Rsh64Ux64 x (Const64 [32]))) (Const32 [int32(c)]))) (Const64 [int64((1<<32)/c)]))) (ZeroExt32to64 (Div32u (Add32 (Mod32u (Trunc64to32 x) (Const32 [int32(c)])) (Mul32 (Mod32u (Trunc64to32 (Rsh64Ux64 x (Const64 [32]))) (Const32 [int32(c)])) (Const32 [int32((1<<32)%c)]))) (Const32 [int32(c)])))) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(c > 0 && c <= 0xFFFF && umagicOK32(int32(c)) && config.RegSize == 4 && config.useHmul) { - break - } - v.reset(OpAdd64) - v0 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpLsh64x64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v4 := b.NewValue0(v.Pos, OpDiv32u, typ.UInt32) - v5 := b.NewValue0(v.Pos, OpTrunc64to32, typ.UInt32) - v6 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64) - v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v7.AuxInt = int64ToAuxInt(32) - v6.AddArg2(x, v7) - v5.AddArg(v6) - v8 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v8.AuxInt = int32ToAuxInt(int32(c)) - v4.AddArg2(v5, v8) - v3.AddArg(v4) - v2.AddArg2(v3, v7) - v9 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v10 := b.NewValue0(v.Pos, OpDiv32u, typ.UInt32) - v11 := b.NewValue0(v.Pos, OpTrunc64to32, typ.UInt32) - v11.AddArg(x) - v10.AddArg2(v11, v8) - v9.AddArg(v10) - v1.AddArg2(v2, v9) - v12 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v13 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v14 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32) - v14.AddArg2(v5, v8) - v13.AddArg(v14) - v15 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v15.AuxInt = int64ToAuxInt(int64((1 << 32) / c)) - v12.AddArg2(v13, v15) - v0.AddArg2(v1, v12) - v16 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v17 := b.NewValue0(v.Pos, OpDiv32u, typ.UInt32) - v18 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v19 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32) - v19.AddArg2(v11, v8) - v20 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v21 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v21.AuxInt = int32ToAuxInt(int32((1 << 32) % c)) - v20.AddArg2(v14, v21) - v18.AddArg2(v19, v20) - v17.AddArg2(v18, v8) - v16.AddArg(v17) - v.AddArg2(v0, v16) - return true - } - // match: (Div64u x (Const64 [c])) - // cond: umagicOK64(c) && config.RegSize == 8 && umagic64(c).m&1 == 0 && config.useHmul - // result: (Rsh64Ux64 (Hmul64u (Const64 [int64(1<<63+umagic64(c).m/2)]) x) (Const64 [umagic64(c).s-1])) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(umagicOK64(c) && config.RegSize == 8 && umagic64(c).m&1 == 0 && config.useHmul) { - break - } - v.reset(OpRsh64Ux64) - v.Type = typ.UInt64 - v0 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v1.AuxInt = int64ToAuxInt(int64(1<<63 + umagic64(c).m/2)) - v0.AddArg2(v1, x) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64ToAuxInt(umagic64(c).s - 1) - v.AddArg2(v0, v2) - return true - } - // match: (Div64u x (Const64 [c])) - // cond: umagicOK64(c) && config.RegSize == 8 && c&1 == 0 && config.useHmul - // result: (Rsh64Ux64 (Hmul64u (Const64 [int64(1<<63+(umagic64(c).m+1)/2)]) (Rsh64Ux64 x (Const64 [1]))) (Const64 [umagic64(c).s-2])) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(umagicOK64(c) && config.RegSize == 8 && c&1 == 0 && config.useHmul) { - break - } - v.reset(OpRsh64Ux64) - v.Type = typ.UInt64 - v0 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v1.AuxInt = int64ToAuxInt(int64(1<<63 + (umagic64(c).m+1)/2)) - v2 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64ToAuxInt(1) - v2.AddArg2(x, v3) - v0.AddArg2(v1, v2) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64ToAuxInt(umagic64(c).s - 2) - v.AddArg2(v0, v4) - return true - } - // match: (Div64u x (Const64 [c])) - // cond: umagicOK64(c) && config.RegSize == 8 && config.useAvg && config.useHmul - // result: (Rsh64Ux64 (Avg64u x (Hmul64u (Const64 [int64(umagic64(c).m)]) x)) (Const64 [umagic64(c).s-1])) - for { - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(umagicOK64(c) && config.RegSize == 8 && config.useAvg && config.useHmul) { - break - } - v.reset(OpRsh64Ux64) - v.Type = typ.UInt64 - v0 := b.NewValue0(v.Pos, OpAvg64u, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64ToAuxInt(int64(umagic64(c).m)) - v1.AddArg2(v2, x) - v0.AddArg2(x, v1) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64ToAuxInt(umagic64(c).s - 1) - v.AddArg2(v0, v3) - return true - } return false } func rewriteValuegeneric_OpDiv8(v *Value) bool { @@ -8019,24 +7289,6 @@ func rewriteValuegeneric_OpDiv8(v *Value) bool { v.AuxInt = int8ToAuxInt(c / d) return true } - // match: (Div8 n (Const8 [c])) - // cond: isNonNegative(n) && isPowerOfTwo(c) - // result: (Rsh8Ux64 n (Const64 [log8(c)])) - for { - n := v_0 - if v_1.Op != OpConst8 { - break - } - c := auxIntToInt8(v_1.AuxInt) - if !(isNonNegative(n) && isPowerOfTwo(c)) { - break - } - v.reset(OpRsh8Ux64) - v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v0.AuxInt = int64ToAuxInt(log8(c)) - v.AddArg2(n, v0) - return true - } // match: (Div8 n (Const8 [c])) // cond: c < 0 && c != -1<<7 // result: (Neg8 (Div8 n (Const8 [-c]))) @@ -8076,67 +7328,6 @@ func rewriteValuegeneric_OpDiv8(v *Value) bool { v.AddArg2(v0, v2) return true } - // match: (Div8 n (Const8 [c])) - // cond: isPowerOfTwo(c) - // result: (Rsh8x64 (Add8 n (Rsh8Ux64 (Rsh8x64 n (Const64 [ 7])) (Const64 [int64( 8-log8(c))]))) (Const64 [int64(log8(c))])) - for { - t := v.Type - n := v_0 - if v_1.Op != OpConst8 { - break - } - c := auxIntToInt8(v_1.AuxInt) - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpRsh8x64) - v0 := b.NewValue0(v.Pos, OpAdd8, t) - v1 := b.NewValue0(v.Pos, OpRsh8Ux64, t) - v2 := b.NewValue0(v.Pos, OpRsh8x64, t) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64ToAuxInt(7) - v2.AddArg2(n, v3) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64ToAuxInt(int64(8 - log8(c))) - v1.AddArg2(v2, v4) - v0.AddArg2(n, v1) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64ToAuxInt(int64(log8(c))) - v.AddArg2(v0, v5) - return true - } - // match: (Div8 x (Const8 [c])) - // cond: smagicOK8(c) - // result: (Sub8 (Rsh32x64 (Mul32 (Const32 [int32(smagic8(c).m)]) (SignExt8to32 x)) (Const64 [8+smagic8(c).s])) (Rsh32x64 (SignExt8to32 x) (Const64 [31]))) - for { - t := v.Type - x := v_0 - if v_1.Op != OpConst8 { - break - } - c := auxIntToInt8(v_1.AuxInt) - if !(smagicOK8(c)) { - break - } - v.reset(OpSub8) - v.Type = t - v0 := b.NewValue0(v.Pos, OpRsh32x64, t) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int32ToAuxInt(int32(smagic8(c).m)) - v3 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v3.AddArg(x) - v1.AddArg2(v2, v3) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64ToAuxInt(8 + smagic8(c).s) - v0.AddArg2(v1, v4) - v5 := b.NewValue0(v.Pos, OpRsh32x64, t) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64ToAuxInt(31) - v5.AddArg2(v3, v6) - v.AddArg2(v0, v5) - return true - } return false } func rewriteValuegeneric_OpDiv8u(v *Value) bool { @@ -8181,32 +7372,6 @@ func rewriteValuegeneric_OpDiv8u(v *Value) bool { v.AddArg2(n, v0) return true } - // match: (Div8u x (Const8 [c])) - // cond: umagicOK8(c) - // result: (Trunc32to8 (Rsh32Ux64 (Mul32 (Const32 [int32(1<<8+umagic8(c).m)]) (ZeroExt8to32 x)) (Const64 [8+umagic8(c).s]))) - for { - x := v_0 - if v_1.Op != OpConst8 { - break - } - c := auxIntToInt8(v_1.AuxInt) - if !(umagicOK8(c)) { - break - } - v.reset(OpTrunc32to8) - v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int32ToAuxInt(int32(1<<8 + umagic8(c).m)) - v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v3.AddArg(x) - v1.AddArg2(v2, v3) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64ToAuxInt(8 + umagic8(c).s) - v0.AddArg2(v1, v4) - v.AddArg(v0) - return true - } return false } func rewriteValuegeneric_OpEq16(v *Value) bool { @@ -8338,445 +7503,6 @@ func rewriteValuegeneric_OpEq16(v *Value) bool { } break } - // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc64to16 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (ZeroExt16to64 x)) (Const64 [s]))) ) ) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic16(c).m) && s == 16+umagic16(c).s && x.Op != OpConst16 && udivisibleOK16(c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int16(udivisible16(c).m)]) x) (Const16 [int16(16-udivisible16(c).k)]) ) (Const16 [int16(udivisible16(c).max)]) ) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpMul16 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst16 { - continue - } - c := auxIntToInt16(v_1_0.AuxInt) - if v_1_1.Op != OpTrunc64to16 { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh64Ux64 { - continue - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul64 { - continue - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - mul_1 := mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { - if mul_0.Op != OpConst64 { - continue - } - m := auxIntToInt64(mul_0.AuxInt) - if mul_1.Op != OpZeroExt16to64 || x != mul_1.Args[0] { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - continue - } - s := auxIntToInt64(v_1_1_0_1.AuxInt) - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic16(c).m) && s == 16+umagic16(c).s && x.Op != OpConst16 && udivisibleOK16(c)) { - continue - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m)) - v1.AddArg2(v2, x) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k)) - v0.AddArg2(v1, v3) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max)) - v.AddArg2(v0, v4) - return true - } - } - } - break - } - // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (ZeroExt16to32 x)) (Const64 [s]))) ) ) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+umagic16(c).m/2) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int16(udivisible16(c).m)]) x) (Const16 [int16(16-udivisible16(c).k)]) ) (Const16 [int16(udivisible16(c).max)]) ) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpMul16 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst16 { - continue - } - c := auxIntToInt16(v_1_0.AuxInt) - if v_1_1.Op != OpTrunc32to16 { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32Ux64 { - continue - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul32 { - continue - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - mul_1 := mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { - if mul_0.Op != OpConst32 { - continue - } - m := auxIntToInt32(mul_0.AuxInt) - if mul_1.Op != OpZeroExt16to32 || x != mul_1.Args[0] { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - continue - } - s := auxIntToInt64(v_1_1_0_1.AuxInt) - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+umagic16(c).m/2) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c)) { - continue - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m)) - v1.AddArg2(v2, x) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k)) - v0.AddArg2(v1, v3) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max)) - v.AddArg2(v0, v4) - return true - } - } - } - break - } - // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1]))) (Const64 [s]))) ) ) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+(umagic16(c).m+1)/2) && s == 16+umagic16(c).s-2 && x.Op != OpConst16 && udivisibleOK16(c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int16(udivisible16(c).m)]) x) (Const16 [int16(16-udivisible16(c).k)]) ) (Const16 [int16(udivisible16(c).max)]) ) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpMul16 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst16 { - continue - } - c := auxIntToInt16(v_1_0.AuxInt) - if v_1_1.Op != OpTrunc32to16 { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32Ux64 { - continue - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul32 { - continue - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - mul_1 := mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { - if mul_0.Op != OpConst32 { - continue - } - m := auxIntToInt32(mul_0.AuxInt) - if mul_1.Op != OpRsh32Ux64 { - continue - } - _ = mul_1.Args[1] - mul_1_0 := mul_1.Args[0] - if mul_1_0.Op != OpZeroExt16to32 || x != mul_1_0.Args[0] { - continue - } - mul_1_1 := mul_1.Args[1] - if mul_1_1.Op != OpConst64 || auxIntToInt64(mul_1_1.AuxInt) != 1 { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - continue - } - s := auxIntToInt64(v_1_1_0_1.AuxInt) - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+(umagic16(c).m+1)/2) && s == 16+umagic16(c).s-2 && x.Op != OpConst16 && udivisibleOK16(c)) { - continue - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m)) - v1.AddArg2(v2, x) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k)) - v0.AddArg2(v1, v3) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max)) - v.AddArg2(v0, v4) - return true - } - } - } - break - } - // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 (Avg32u (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) mul:(Mul32 (Const32 [m]) (ZeroExt16to32 x))) (Const64 [s]))) ) ) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic16(c).m) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c) - // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int16(udivisible16(c).m)]) x) (Const16 [int16(16-udivisible16(c).k)]) ) (Const16 [int16(udivisible16(c).max)]) ) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpMul16 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst16 { - continue - } - c := auxIntToInt16(v_1_0.AuxInt) - if v_1_1.Op != OpTrunc32to16 { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32Ux64 { - continue - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAvg32u { - continue - } - _ = v_1_1_0_0.Args[1] - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpLsh32x64 { - continue - } - _ = v_1_1_0_0_0.Args[1] - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpZeroExt16to32 || x != v_1_1_0_0_0_0.Args[0] { - continue - } - v_1_1_0_0_0_1 := v_1_1_0_0_0.Args[1] - if v_1_1_0_0_0_1.Op != OpConst64 || auxIntToInt64(v_1_1_0_0_0_1.AuxInt) != 16 { - continue - } - mul := v_1_1_0_0.Args[1] - if mul.Op != OpMul32 { - continue - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - mul_1 := mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { - if mul_0.Op != OpConst32 { - continue - } - m := auxIntToInt32(mul_0.AuxInt) - if mul_1.Op != OpZeroExt16to32 || x != mul_1.Args[0] { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - continue - } - s := auxIntToInt64(v_1_1_0_1.AuxInt) - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic16(c).m) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c)) { - continue - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m)) - v1.AddArg2(v2, x) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k)) - v0.AddArg2(v1, v3) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max)) - v.AddArg2(v0, v4) - return true - } - } - } - break - } - // match: (Eq16 x (Mul16 (Const16 [c]) (Sub16 (Rsh32x64 mul:(Mul32 (Const32 [m]) (SignExt16to32 x)) (Const64 [s])) (Rsh32x64 (SignExt16to32 x) (Const64 [31]))) ) ) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic16(c).m) && s == 16+smagic16(c).s && x.Op != OpConst16 && sdivisibleOK16(c) - // result: (Leq16U (RotateLeft16 (Add16 (Mul16 (Const16 [int16(sdivisible16(c).m)]) x) (Const16 [int16(sdivisible16(c).a)]) ) (Const16 [int16(16-sdivisible16(c).k)]) ) (Const16 [int16(sdivisible16(c).max)]) ) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpMul16 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst16 { - continue - } - c := auxIntToInt16(v_1_0.AuxInt) - if v_1_1.Op != OpSub16 { - continue - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32x64 { - continue - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul32 { - continue - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - mul_1 := mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { - if mul_0.Op != OpConst32 { - continue - } - m := auxIntToInt32(mul_0.AuxInt) - if mul_1.Op != OpSignExt16to32 || x != mul_1.Args[0] { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - continue - } - s := auxIntToInt64(v_1_1_0_1.AuxInt) - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh32x64 { - continue - } - _ = v_1_1_1.Args[1] - v_1_1_1_0 := v_1_1_1.Args[0] - if v_1_1_1_0.Op != OpSignExt16to32 || x != v_1_1_1_0.Args[0] { - continue - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic16(c).m) && s == 16+smagic16(c).s && x.Op != OpConst16 && sdivisibleOK16(c)) { - continue - } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) - v1 := b.NewValue0(v.Pos, OpAdd16, typ.UInt16) - v2 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) - v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v3.AuxInt = int16ToAuxInt(int16(sdivisible16(c).m)) - v2.AddArg2(v3, x) - v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v4.AuxInt = int16ToAuxInt(int16(sdivisible16(c).a)) - v1.AddArg2(v2, v4) - v5 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v5.AuxInt = int16ToAuxInt(int16(16 - sdivisible16(c).k)) - v0.AddArg2(v1, v5) - v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) - v6.AuxInt = int16ToAuxInt(int16(sdivisible16(c).max)) - v.AddArg2(v0, v6) - return true - } - } - } - break - } - // match: (Eq16 n (Lsh16x64 (Rsh16x64 (Add16 n (Rsh16Ux64 (Rsh16x64 n (Const64 [15])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) - // cond: k > 0 && k < 15 && kbar == 16 - k - // result: (Eq16 (And16 n (Const16 [1< [0])) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - n := v_0 - if v_1.Op != OpLsh16x64 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh16x64 { - continue - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAdd16 { - continue - } - t := v_1_0_0.Type - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - v_1_0_0_1 := v_1_0_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 { - if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh16Ux64 || v_1_0_0_1.Type != t { - continue - } - _ = v_1_0_0_1.Args[1] - v_1_0_0_1_0 := v_1_0_0_1.Args[0] - if v_1_0_0_1_0.Op != OpRsh16x64 || v_1_0_0_1_0.Type != t { - continue - } - _ = v_1_0_0_1_0.Args[1] - if n != v_1_0_0_1_0.Args[0] { - continue - } - v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1] - if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 15 { - continue - } - v_1_0_0_1_1 := v_1_0_0_1.Args[1] - if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 { - continue - } - kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt) - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { - continue - } - k := auxIntToInt64(v_1_0_1.AuxInt) - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 15 && kbar == 16-k) { - continue - } - v.reset(OpEq16) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v1 := b.NewValue0(v.Pos, OpConst16, t) - v1.AuxInt = int16ToAuxInt(1< (Mul32 (Const32 [int32(udivisible32(c).m)]) x) (Const32 [int32(32-udivisible32(c).k)]) ) (Const32 [int32(udivisible32(c).max)]) ) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpMul32 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst32 { - continue - } - c := auxIntToInt32(v_1_0.AuxInt) - if v_1_1.Op != OpRsh32Ux64 { - continue - } - _ = v_1_1.Args[1] - mul := v_1_1.Args[0] - if mul.Op != OpHmul32u { - continue - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - mul_1 := mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { - if mul_0.Op != OpConst32 { - continue - } - m := auxIntToInt32(mul_0.AuxInt) - if x != mul_1 { - continue - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpConst64 { - continue - } - s := auxIntToInt64(v_1_1_1.AuxInt) - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+umagic32(c).m/2) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)) { - continue - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m)) - v1.AddArg2(v2, x) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k)) - v0.AddArg2(v1, v3) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max)) - v.AddArg2(v0, v4) - return true - } - } - } - break - } - // match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 mul:(Hmul32u (Const32 [m]) (Rsh32Ux64 x (Const64 [1]))) (Const64 [s])) ) ) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+(umagic32(c).m+1)/2) && s == umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int32(udivisible32(c).m)]) x) (Const32 [int32(32-udivisible32(c).k)]) ) (Const32 [int32(udivisible32(c).max)]) ) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpMul32 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst32 { - continue - } - c := auxIntToInt32(v_1_0.AuxInt) - if v_1_1.Op != OpRsh32Ux64 { - continue - } - _ = v_1_1.Args[1] - mul := v_1_1.Args[0] - if mul.Op != OpHmul32u { - continue - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - mul_1 := mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { - if mul_0.Op != OpConst32 || mul_0.Type != typ.UInt32 { - continue - } - m := auxIntToInt32(mul_0.AuxInt) - if mul_1.Op != OpRsh32Ux64 { - continue - } - _ = mul_1.Args[1] - if x != mul_1.Args[0] { - continue - } - mul_1_1 := mul_1.Args[1] - if mul_1_1.Op != OpConst64 || auxIntToInt64(mul_1_1.AuxInt) != 1 { - continue - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpConst64 { - continue - } - s := auxIntToInt64(v_1_1_1.AuxInt) - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+(umagic32(c).m+1)/2) && s == umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c)) { - continue - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m)) - v1.AddArg2(v2, x) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k)) - v0.AddArg2(v1, v3) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max)) - v.AddArg2(v0, v4) - return true - } - } - } - break - } - // match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 (Avg32u x mul:(Hmul32u (Const32 [m]) x)) (Const64 [s])) ) ) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic32(c).m) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int32(udivisible32(c).m)]) x) (Const32 [int32(32-udivisible32(c).k)]) ) (Const32 [int32(udivisible32(c).max)]) ) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpMul32 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst32 { - continue - } - c := auxIntToInt32(v_1_0.AuxInt) - if v_1_1.Op != OpRsh32Ux64 { - continue - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAvg32u { - continue - } - _ = v_1_1_0.Args[1] - if x != v_1_1_0.Args[0] { - continue - } - mul := v_1_1_0.Args[1] - if mul.Op != OpHmul32u { - continue - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - mul_1 := mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { - if mul_0.Op != OpConst32 { - continue - } - m := auxIntToInt32(mul_0.AuxInt) - if x != mul_1 { - continue - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpConst64 { - continue - } - s := auxIntToInt64(v_1_1_1.AuxInt) - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic32(c).m) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)) { - continue - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m)) - v1.AddArg2(v2, x) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k)) - v0.AddArg2(v1, v3) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max)) - v.AddArg2(v0, v4) - return true - } - } - } - break - } - // match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (ZeroExt32to64 x)) (Const64 [s]))) ) ) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic32(c).m/2) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int32(udivisible32(c).m)]) x) (Const32 [int32(32-udivisible32(c).k)]) ) (Const32 [int32(udivisible32(c).max)]) ) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpMul32 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst32 { - continue - } - c := auxIntToInt32(v_1_0.AuxInt) - if v_1_1.Op != OpTrunc64to32 { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh64Ux64 { - continue - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul64 { - continue - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - mul_1 := mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { - if mul_0.Op != OpConst64 { - continue - } - m := auxIntToInt64(mul_0.AuxInt) - if mul_1.Op != OpZeroExt32to64 || x != mul_1.Args[0] { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - continue - } - s := auxIntToInt64(v_1_1_0_1.AuxInt) - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic32(c).m/2) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)) { - continue - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m)) - v1.AddArg2(v2, x) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k)) - v0.AddArg2(v1, v3) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max)) - v.AddArg2(v0, v4) - return true - } - } - } - break - } - // match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1]))) (Const64 [s]))) ) ) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic32(c).m+1)/2) && s == 32+umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int32(udivisible32(c).m)]) x) (Const32 [int32(32-udivisible32(c).k)]) ) (Const32 [int32(udivisible32(c).max)]) ) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpMul32 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst32 { - continue - } - c := auxIntToInt32(v_1_0.AuxInt) - if v_1_1.Op != OpTrunc64to32 { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh64Ux64 { - continue - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul64 { - continue - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - mul_1 := mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { - if mul_0.Op != OpConst64 { - continue - } - m := auxIntToInt64(mul_0.AuxInt) - if mul_1.Op != OpRsh64Ux64 { - continue - } - _ = mul_1.Args[1] - mul_1_0 := mul_1.Args[0] - if mul_1_0.Op != OpZeroExt32to64 || x != mul_1_0.Args[0] { - continue - } - mul_1_1 := mul_1.Args[1] - if mul_1_1.Op != OpConst64 || auxIntToInt64(mul_1_1.AuxInt) != 1 { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - continue - } - s := auxIntToInt64(v_1_1_0_1.AuxInt) - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic32(c).m+1)/2) && s == 32+umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c)) { - continue - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m)) - v1.AddArg2(v2, x) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k)) - v0.AddArg2(v1, v3) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max)) - v.AddArg2(v0, v4) - return true - } - } - } - break - } - // match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 (Avg64u (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) mul:(Mul64 (Const64 [m]) (ZeroExt32to64 x))) (Const64 [s]))) ) ) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic32(c).m) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c) - // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int32(udivisible32(c).m)]) x) (Const32 [int32(32-udivisible32(c).k)]) ) (Const32 [int32(udivisible32(c).max)]) ) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpMul32 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst32 { - continue - } - c := auxIntToInt32(v_1_0.AuxInt) - if v_1_1.Op != OpTrunc64to32 { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh64Ux64 { - continue - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAvg64u { - continue - } - _ = v_1_1_0_0.Args[1] - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpLsh64x64 { - continue - } - _ = v_1_1_0_0_0.Args[1] - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpZeroExt32to64 || x != v_1_1_0_0_0_0.Args[0] { - continue - } - v_1_1_0_0_0_1 := v_1_1_0_0_0.Args[1] - if v_1_1_0_0_0_1.Op != OpConst64 || auxIntToInt64(v_1_1_0_0_0_1.AuxInt) != 32 { - continue - } - mul := v_1_1_0_0.Args[1] - if mul.Op != OpMul64 { - continue - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - mul_1 := mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { - if mul_0.Op != OpConst64 { - continue - } - m := auxIntToInt64(mul_0.AuxInt) - if mul_1.Op != OpZeroExt32to64 || x != mul_1.Args[0] { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - continue - } - s := auxIntToInt64(v_1_1_0_1.AuxInt) - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic32(c).m) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)) { - continue - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m)) - v1.AddArg2(v2, x) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k)) - v0.AddArg2(v1, v3) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max)) - v.AddArg2(v0, v4) - return true - } - } - } - break - } - // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh64x64 mul:(Mul64 (Const64 [m]) (SignExt32to64 x)) (Const64 [s])) (Rsh64x64 (SignExt32to64 x) (Const64 [63]))) ) ) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic32(c).m) && s == 32+smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int32(sdivisible32(c).m)]) x) (Const32 [int32(sdivisible32(c).a)]) ) (Const32 [int32(32-sdivisible32(c).k)]) ) (Const32 [int32(sdivisible32(c).max)]) ) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpMul32 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst32 { - continue - } - c := auxIntToInt32(v_1_0.AuxInt) - if v_1_1.Op != OpSub32 { - continue - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh64x64 { - continue - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul64 { - continue - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - mul_1 := mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { - if mul_0.Op != OpConst64 { - continue - } - m := auxIntToInt64(mul_0.AuxInt) - if mul_1.Op != OpSignExt32to64 || x != mul_1.Args[0] { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - continue - } - s := auxIntToInt64(v_1_1_0_1.AuxInt) - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh64x64 { - continue - } - _ = v_1_1_1.Args[1] - v_1_1_1_0 := v_1_1_1.Args[0] - if v_1_1_1_0.Op != OpSignExt32to64 || x != v_1_1_1_0.Args[0] { - continue - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic32(c).m) && s == 32+smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c)) { - continue - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int32ToAuxInt(int32(sdivisible32(c).m)) - v2.AddArg2(v3, x) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int32ToAuxInt(int32(sdivisible32(c).a)) - v1.AddArg2(v2, v4) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int32ToAuxInt(int32(32 - sdivisible32(c).k)) - v0.AddArg2(v1, v5) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int32ToAuxInt(int32(sdivisible32(c).max)) - v.AddArg2(v0, v6) - return true - } - } - } - break - } - // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 mul:(Hmul32 (Const32 [m]) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) ) ) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m/2) && s == smagic32(c).s-1 && x.Op != OpConst32 && sdivisibleOK32(c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int32(sdivisible32(c).m)]) x) (Const32 [int32(sdivisible32(c).a)]) ) (Const32 [int32(32-sdivisible32(c).k)]) ) (Const32 [int32(sdivisible32(c).max)]) ) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpMul32 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst32 { - continue - } - c := auxIntToInt32(v_1_0.AuxInt) - if v_1_1.Op != OpSub32 { - continue - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32x64 { - continue - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpHmul32 { - continue - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - mul_1 := mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { - if mul_0.Op != OpConst32 { - continue - } - m := auxIntToInt32(mul_0.AuxInt) - if x != mul_1 { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - continue - } - s := auxIntToInt64(v_1_1_0_1.AuxInt) - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh32x64 { - continue - } - _ = v_1_1_1.Args[1] - if x != v_1_1_1.Args[0] { - continue - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m/2) && s == smagic32(c).s-1 && x.Op != OpConst32 && sdivisibleOK32(c)) { - continue - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int32ToAuxInt(int32(sdivisible32(c).m)) - v2.AddArg2(v3, x) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int32ToAuxInt(int32(sdivisible32(c).a)) - v1.AddArg2(v2, v4) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int32ToAuxInt(int32(32 - sdivisible32(c).k)) - v0.AddArg2(v1, v5) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int32ToAuxInt(int32(sdivisible32(c).max)) - v.AddArg2(v0, v6) - return true - } - } - } - break - } - // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 (Add32 mul:(Hmul32 (Const32 [m]) x) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) ) ) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m) && s == smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c) - // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int32(sdivisible32(c).m)]) x) (Const32 [int32(sdivisible32(c).a)]) ) (Const32 [int32(32-sdivisible32(c).k)]) ) (Const32 [int32(sdivisible32(c).max)]) ) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpMul32 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst32 { - continue - } - c := auxIntToInt32(v_1_0.AuxInt) - if v_1_1.Op != OpSub32 { - continue - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32x64 { - continue - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAdd32 { - continue - } - _ = v_1_1_0_0.Args[1] - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - v_1_1_0_0_1 := v_1_1_0_0.Args[1] - for _i2 := 0; _i2 <= 1; _i2, v_1_1_0_0_0, v_1_1_0_0_1 = _i2+1, v_1_1_0_0_1, v_1_1_0_0_0 { - mul := v_1_1_0_0_0 - if mul.Op != OpHmul32 { - continue - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - mul_1 := mul.Args[1] - for _i3 := 0; _i3 <= 1; _i3, mul_0, mul_1 = _i3+1, mul_1, mul_0 { - if mul_0.Op != OpConst32 { - continue - } - m := auxIntToInt32(mul_0.AuxInt) - if x != mul_1 || x != v_1_1_0_0_1 { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - continue - } - s := auxIntToInt64(v_1_1_0_1.AuxInt) - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh32x64 { - continue - } - _ = v_1_1_1.Args[1] - if x != v_1_1_1.Args[0] { - continue - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m) && s == smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c)) { - continue - } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) - v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v3.AuxInt = int32ToAuxInt(int32(sdivisible32(c).m)) - v2.AddArg2(v3, x) - v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v4.AuxInt = int32ToAuxInt(int32(sdivisible32(c).a)) - v1.AddArg2(v2, v4) - v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v5.AuxInt = int32ToAuxInt(int32(32 - sdivisible32(c).k)) - v0.AddArg2(v1, v5) - v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) - v6.AuxInt = int32ToAuxInt(int32(sdivisible32(c).max)) - v.AddArg2(v0, v6) - return true - } - } - } - } - break - } - // match: (Eq32 n (Lsh32x64 (Rsh32x64 (Add32 n (Rsh32Ux64 (Rsh32x64 n (Const64 [31])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) - // cond: k > 0 && k < 31 && kbar == 32 - k - // result: (Eq32 (And32 n (Const32 [1< [0])) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - n := v_0 - if v_1.Op != OpLsh32x64 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh32x64 { - continue - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAdd32 { - continue - } - t := v_1_0_0.Type - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - v_1_0_0_1 := v_1_0_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 { - if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh32Ux64 || v_1_0_0_1.Type != t { - continue - } - _ = v_1_0_0_1.Args[1] - v_1_0_0_1_0 := v_1_0_0_1.Args[0] - if v_1_0_0_1_0.Op != OpRsh32x64 || v_1_0_0_1_0.Type != t { - continue - } - _ = v_1_0_0_1_0.Args[1] - if n != v_1_0_0_1_0.Args[0] { - continue - } - v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1] - if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 31 { - continue - } - v_1_0_0_1_1 := v_1_0_0_1.Args[1] - if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 { - continue - } - kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt) - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { - continue - } - k := auxIntToInt64(v_1_0_1.AuxInt) - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 31 && kbar == 32-k) { - continue - } - v.reset(OpEq32) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v1 := b.NewValue0(v.Pos, OpConst32, t) - v1.AuxInt = int32ToAuxInt(1< (Mul64 (Const64 [int64(udivisible64(c).m)]) x) (Const64 [64-udivisible64(c).k]) ) (Const64 [int64(udivisible64(c).max)]) ) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpMul64 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst64 { - continue - } - c := auxIntToInt64(v_1_0.AuxInt) - if v_1_1.Op != OpRsh64Ux64 { - continue - } - _ = v_1_1.Args[1] - mul := v_1_1.Args[0] - if mul.Op != OpHmul64u { - continue - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - mul_1 := mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { - if mul_0.Op != OpConst64 { - continue - } - m := auxIntToInt64(mul_0.AuxInt) - if x != mul_1 { - continue - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpConst64 { - continue - } - s := auxIntToInt64(v_1_1_1.AuxInt) - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic64(c).m/2) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c)) { - continue - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64ToAuxInt(int64(udivisible64(c).m)) - v1.AddArg2(v2, x) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64ToAuxInt(64 - udivisible64(c).k) - v0.AddArg2(v1, v3) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64ToAuxInt(int64(udivisible64(c).max)) - v.AddArg2(v0, v4) - return true - } - } - } - break - } - // match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 mul:(Hmul64u (Const64 [m]) (Rsh64Ux64 x (Const64 [1]))) (Const64 [s])) ) ) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic64(c).m+1)/2) && s == umagic64(c).s-2 && x.Op != OpConst64 && udivisibleOK64(c) - // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible64(c).m)]) x) (Const64 [64-udivisible64(c).k]) ) (Const64 [int64(udivisible64(c).max)]) ) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpMul64 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst64 { - continue - } - c := auxIntToInt64(v_1_0.AuxInt) - if v_1_1.Op != OpRsh64Ux64 { - continue - } - _ = v_1_1.Args[1] - mul := v_1_1.Args[0] - if mul.Op != OpHmul64u { - continue - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - mul_1 := mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { - if mul_0.Op != OpConst64 { - continue - } - m := auxIntToInt64(mul_0.AuxInt) - if mul_1.Op != OpRsh64Ux64 { - continue - } - _ = mul_1.Args[1] - if x != mul_1.Args[0] { - continue - } - mul_1_1 := mul_1.Args[1] - if mul_1_1.Op != OpConst64 || auxIntToInt64(mul_1_1.AuxInt) != 1 { - continue - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpConst64 { - continue - } - s := auxIntToInt64(v_1_1_1.AuxInt) - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic64(c).m+1)/2) && s == umagic64(c).s-2 && x.Op != OpConst64 && udivisibleOK64(c)) { - continue - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64ToAuxInt(int64(udivisible64(c).m)) - v1.AddArg2(v2, x) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64ToAuxInt(64 - udivisible64(c).k) - v0.AddArg2(v1, v3) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64ToAuxInt(int64(udivisible64(c).max)) - v.AddArg2(v0, v4) - return true - } - } - } - break - } - // match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 (Avg64u x mul:(Hmul64u (Const64 [m]) x)) (Const64 [s])) ) ) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic64(c).m) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c) - // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible64(c).m)]) x) (Const64 [64-udivisible64(c).k]) ) (Const64 [int64(udivisible64(c).max)]) ) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpMul64 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst64 { - continue - } - c := auxIntToInt64(v_1_0.AuxInt) - if v_1_1.Op != OpRsh64Ux64 { - continue - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAvg64u { - continue - } - _ = v_1_1_0.Args[1] - if x != v_1_1_0.Args[0] { - continue - } - mul := v_1_1_0.Args[1] - if mul.Op != OpHmul64u { - continue - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - mul_1 := mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { - if mul_0.Op != OpConst64 { - continue - } - m := auxIntToInt64(mul_0.AuxInt) - if x != mul_1 { - continue - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpConst64 { - continue - } - s := auxIntToInt64(v_1_1_1.AuxInt) - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic64(c).m) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c)) { - continue - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v2.AuxInt = int64ToAuxInt(int64(udivisible64(c).m)) - v1.AddArg2(v2, x) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64ToAuxInt(64 - udivisible64(c).k) - v0.AddArg2(v1, v3) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64ToAuxInt(int64(udivisible64(c).max)) - v.AddArg2(v0, v4) - return true - } - } - } - break - } - // match: (Eq64 x (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 mul:(Hmul64 (Const64 [m]) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) ) ) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m/2) && s == smagic64(c).s-1 && x.Op != OpConst64 && sdivisibleOK64(c) - // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible64(c).m)]) x) (Const64 [int64(sdivisible64(c).a)]) ) (Const64 [64-sdivisible64(c).k]) ) (Const64 [int64(sdivisible64(c).max)]) ) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpMul64 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst64 { - continue - } - c := auxIntToInt64(v_1_0.AuxInt) - if v_1_1.Op != OpSub64 { - continue - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh64x64 { - continue - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpHmul64 { - continue - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - mul_1 := mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { - if mul_0.Op != OpConst64 { - continue - } - m := auxIntToInt64(mul_0.AuxInt) - if x != mul_1 { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - continue - } - s := auxIntToInt64(v_1_1_0_1.AuxInt) - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh64x64 { - continue - } - _ = v_1_1_1.Args[1] - if x != v_1_1_1.Args[0] { - continue - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m/2) && s == smagic64(c).s-1 && x.Op != OpConst64 && sdivisibleOK64(c)) { - continue - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64ToAuxInt(int64(sdivisible64(c).m)) - v2.AddArg2(v3, x) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64ToAuxInt(int64(sdivisible64(c).a)) - v1.AddArg2(v2, v4) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64ToAuxInt(64 - sdivisible64(c).k) - v0.AddArg2(v1, v5) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64ToAuxInt(int64(sdivisible64(c).max)) - v.AddArg2(v0, v6) - return true - } - } - } - break - } - // match: (Eq64 x (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 (Add64 mul:(Hmul64 (Const64 [m]) x) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) ) ) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m) && s == smagic64(c).s && x.Op != OpConst64 && sdivisibleOK64(c) - // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible64(c).m)]) x) (Const64 [int64(sdivisible64(c).a)]) ) (Const64 [64-sdivisible64(c).k]) ) (Const64 [int64(sdivisible64(c).max)]) ) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpMul64 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst64 { - continue - } - c := auxIntToInt64(v_1_0.AuxInt) - if v_1_1.Op != OpSub64 { - continue - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh64x64 { - continue - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAdd64 { - continue - } - _ = v_1_1_0_0.Args[1] - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - v_1_1_0_0_1 := v_1_1_0_0.Args[1] - for _i2 := 0; _i2 <= 1; _i2, v_1_1_0_0_0, v_1_1_0_0_1 = _i2+1, v_1_1_0_0_1, v_1_1_0_0_0 { - mul := v_1_1_0_0_0 - if mul.Op != OpHmul64 { - continue - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - mul_1 := mul.Args[1] - for _i3 := 0; _i3 <= 1; _i3, mul_0, mul_1 = _i3+1, mul_1, mul_0 { - if mul_0.Op != OpConst64 { - continue - } - m := auxIntToInt64(mul_0.AuxInt) - if x != mul_1 || x != v_1_1_0_0_1 { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - continue - } - s := auxIntToInt64(v_1_1_0_1.AuxInt) - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh64x64 { - continue - } - _ = v_1_1_1.Args[1] - if x != v_1_1_1.Args[0] { - continue - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m) && s == smagic64(c).s && x.Op != OpConst64 && sdivisibleOK64(c)) { - continue - } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) - v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v3.AuxInt = int64ToAuxInt(int64(sdivisible64(c).m)) - v2.AddArg2(v3, x) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v4.AuxInt = int64ToAuxInt(int64(sdivisible64(c).a)) - v1.AddArg2(v2, v4) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v5.AuxInt = int64ToAuxInt(64 - sdivisible64(c).k) - v0.AddArg2(v1, v5) - v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v6.AuxInt = int64ToAuxInt(int64(sdivisible64(c).max)) - v.AddArg2(v0, v6) - return true - } - } - } - } - break - } - // match: (Eq64 n (Lsh64x64 (Rsh64x64 (Add64 n (Rsh64Ux64 (Rsh64x64 n (Const64 [63])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) - // cond: k > 0 && k < 63 && kbar == 64 - k - // result: (Eq64 (And64 n (Const64 [1< [0])) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - n := v_0 - if v_1.Op != OpLsh64x64 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh64x64 { - continue - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAdd64 { - continue - } - t := v_1_0_0.Type - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - v_1_0_0_1 := v_1_0_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 { - if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh64Ux64 || v_1_0_0_1.Type != t { - continue - } - _ = v_1_0_0_1.Args[1] - v_1_0_0_1_0 := v_1_0_0_1.Args[0] - if v_1_0_0_1_0.Op != OpRsh64x64 || v_1_0_0_1_0.Type != t { - continue - } - _ = v_1_0_0_1_0.Args[1] - if n != v_1_0_0_1_0.Args[0] { - continue - } - v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1] - if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 63 { - continue - } - v_1_0_0_1_1 := v_1_0_0_1.Args[1] - if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 { - continue - } - kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt) - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { - continue - } - k := auxIntToInt64(v_1_0_1.AuxInt) - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 63 && kbar == 64-k) { - continue - } - v.reset(OpEq64) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = int64ToAuxInt(1< (Mul8 (Const8 [int8(udivisible8(c).m)]) x) (Const8 [int8(8-udivisible8(c).k)]) ) (Const8 [int8(udivisible8(c).max)]) ) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpMul8 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst8 { - continue - } - c := auxIntToInt8(v_1_0.AuxInt) - if v_1_1.Op != OpTrunc32to8 { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32Ux64 { - continue - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul32 { - continue - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - mul_1 := mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { - if mul_0.Op != OpConst32 { - continue - } - m := auxIntToInt32(mul_0.AuxInt) - if mul_1.Op != OpZeroExt8to32 || x != mul_1.Args[0] { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - continue - } - s := auxIntToInt64(v_1_1_0_1.AuxInt) - if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<8+umagic8(c).m) && s == 8+umagic8(c).s && x.Op != OpConst8 && udivisibleOK8(c)) { - continue - } - v.reset(OpLeq8U) - v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8) - v1 := b.NewValue0(v.Pos, OpMul8, typ.UInt8) - v2 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v2.AuxInt = int8ToAuxInt(int8(udivisible8(c).m)) - v1.AddArg2(v2, x) - v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v3.AuxInt = int8ToAuxInt(int8(8 - udivisible8(c).k)) - v0.AddArg2(v1, v3) - v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v4.AuxInt = int8ToAuxInt(int8(udivisible8(c).max)) - v.AddArg2(v0, v4) - return true - } - } - } - break - } - // match: (Eq8 x (Mul8 (Const8 [c]) (Sub8 (Rsh32x64 mul:(Mul32 (Const32 [m]) (SignExt8to32 x)) (Const64 [s])) (Rsh32x64 (SignExt8to32 x) (Const64 [31]))) ) ) - // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic8(c).m) && s == 8+smagic8(c).s && x.Op != OpConst8 && sdivisibleOK8(c) - // result: (Leq8U (RotateLeft8 (Add8 (Mul8 (Const8 [int8(sdivisible8(c).m)]) x) (Const8 [int8(sdivisible8(c).a)]) ) (Const8 [int8(8-sdivisible8(c).k)]) ) (Const8 [int8(sdivisible8(c).max)]) ) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpMul8 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst8 { - continue - } - c := auxIntToInt8(v_1_0.AuxInt) - if v_1_1.Op != OpSub8 { - continue - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpRsh32x64 { - continue - } - _ = v_1_1_0.Args[1] - mul := v_1_1_0.Args[0] - if mul.Op != OpMul32 { - continue - } - _ = mul.Args[1] - mul_0 := mul.Args[0] - mul_1 := mul.Args[1] - for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { - if mul_0.Op != OpConst32 { - continue - } - m := auxIntToInt32(mul_0.AuxInt) - if mul_1.Op != OpSignExt8to32 || x != mul_1.Args[0] { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpConst64 { - continue - } - s := auxIntToInt64(v_1_1_0_1.AuxInt) - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpRsh32x64 { - continue - } - _ = v_1_1_1.Args[1] - v_1_1_1_0 := v_1_1_1.Args[0] - if v_1_1_1_0.Op != OpSignExt8to32 || x != v_1_1_1_0.Args[0] { - continue - } - v_1_1_1_1 := v_1_1_1.Args[1] - if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic8(c).m) && s == 8+smagic8(c).s && x.Op != OpConst8 && sdivisibleOK8(c)) { - continue - } - v.reset(OpLeq8U) - v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8) - v1 := b.NewValue0(v.Pos, OpAdd8, typ.UInt8) - v2 := b.NewValue0(v.Pos, OpMul8, typ.UInt8) - v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v3.AuxInt = int8ToAuxInt(int8(sdivisible8(c).m)) - v2.AddArg2(v3, x) - v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v4.AuxInt = int8ToAuxInt(int8(sdivisible8(c).a)) - v1.AddArg2(v2, v4) - v5 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v5.AuxInt = int8ToAuxInt(int8(8 - sdivisible8(c).k)) - v0.AddArg2(v1, v5) - v6 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) - v6.AuxInt = int8ToAuxInt(int8(sdivisible8(c).max)) - v.AddArg2(v0, v6) - return true - } - } - } - break - } - // match: (Eq8 n (Lsh8x64 (Rsh8x64 (Add8 n (Rsh8Ux64 (Rsh8x64 n (Const64 [ 7])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) - // cond: k > 0 && k < 7 && kbar == 8 - k - // result: (Eq8 (And8 n (Const8 [1< [0])) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - n := v_0 - if v_1.Op != OpLsh8x64 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh8x64 { - continue - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAdd8 { - continue - } - t := v_1_0_0.Type - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - v_1_0_0_1 := v_1_0_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 { - if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh8Ux64 || v_1_0_0_1.Type != t { - continue - } - _ = v_1_0_0_1.Args[1] - v_1_0_0_1_0 := v_1_0_0_1.Args[0] - if v_1_0_0_1_0.Op != OpRsh8x64 || v_1_0_0_1_0.Type != t { - continue - } - _ = v_1_0_0_1_0.Args[1] - if n != v_1_0_0_1_0.Args[0] { - continue - } - v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1] - if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 7 { - continue - } - v_1_0_0_1_1 := v_1_0_0_1.Args[1] - if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 { - continue - } - kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt) - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { - continue - } - k := auxIntToInt64(v_1_0_1.AuxInt) - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 7 && kbar == 8-k) { - continue - } - v.reset(OpEq8) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v1 := b.NewValue0(v.Pos, OpConst8, t) - v1.AuxInt = int8ToAuxInt(1< x (Const16 [c])) - // cond: x.Op != OpConst16 && c > 0 && umagicOK16(c) + // cond: x.Op != OpConst16 && c != 0 // result: (Sub16 x (Mul16 (Div16u x (Const16 [c])) (Const16 [c]))) for { t := v.Type @@ -16790,7 +14733,7 @@ func rewriteValuegeneric_OpMod16u(v *Value) bool { break } c := auxIntToInt16(v_1.AuxInt) - if !(x.Op != OpConst16 && c > 0 && umagicOK16(c)) { + if !(x.Op != OpConst16 && c != 0) { break } v.reset(OpSub16) @@ -16935,7 +14878,7 @@ func rewriteValuegeneric_OpMod32u(v *Value) bool { return true } // match: (Mod32u x (Const32 [c])) - // cond: x.Op != OpConst32 && c > 0 && umagicOK32(c) + // cond: x.Op != OpConst32 && c != 0 // result: (Sub32 x (Mul32 (Div32u x (Const32 [c])) (Const32 [c]))) for { t := v.Type @@ -16944,7 +14887,7 @@ func rewriteValuegeneric_OpMod32u(v *Value) bool { break } c := auxIntToInt32(v_1.AuxInt) - if !(x.Op != OpConst32 && c > 0 && umagicOK32(c)) { + if !(x.Op != OpConst32 && c != 0) { break } v.reset(OpSub32) @@ -17100,7 +15043,7 @@ func rewriteValuegeneric_OpMod64u(v *Value) bool { return true } // match: (Mod64u x (Const64 [c])) - // cond: x.Op != OpConst64 && c > 0 && umagicOK64(c) + // cond: x.Op != OpConst64 && c != 0 // result: (Sub64 x (Mul64 (Div64u x (Const64 [c])) (Const64 [c]))) for { t := v.Type @@ -17109,7 +15052,7 @@ func rewriteValuegeneric_OpMod64u(v *Value) bool { break } c := auxIntToInt64(v_1.AuxInt) - if !(x.Op != OpConst64 && c > 0 && umagicOK64(c)) { + if !(x.Op != OpConst64 && c != 0) { break } v.reset(OpSub64) @@ -17254,7 +15197,7 @@ func rewriteValuegeneric_OpMod8u(v *Value) bool { return true } // match: (Mod8u x (Const8 [c])) - // cond: x.Op != OpConst8 && c > 0 && umagicOK8( c) + // cond: x.Op != OpConst8 && c != 0 // result: (Sub8 x (Mul8 (Div8u x (Const8 [c])) (Const8 [c]))) for { t := v.Type @@ -17263,7 +15206,7 @@ func rewriteValuegeneric_OpMod8u(v *Value) bool { break } c := auxIntToInt8(v_1.AuxInt) - if !(x.Op != OpConst8 && c > 0 && umagicOK8(c)) { + if !(x.Op != OpConst8 && c != 0) { break } v.reset(OpSub8) @@ -18667,54 +16610,8 @@ func rewriteValuegeneric_OpMul16(v *Value) bool { } break } - // match: (Mul16 n (Const16 [c])) - // cond: isPowerOfTwo(c) - // result: (Lsh16x64 n (Const64 [log16(c)])) - for { - t := v.Type - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - n := v_0 - if v_1.Op != OpConst16 { - continue - } - c := auxIntToInt16(v_1.AuxInt) - if !(isPowerOfTwo(c)) { - continue - } - v.reset(OpLsh16x64) - v.Type = t - v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v0.AuxInt = int64ToAuxInt(log16(c)) - v.AddArg2(n, v0) - return true - } - break - } - // match: (Mul16 n (Const16 [c])) - // cond: t.IsSigned() && isPowerOfTwo(-c) - // result: (Neg16 (Lsh16x64 n (Const64 [log16(-c)]))) - for { - t := v.Type - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - n := v_0 - if v_1.Op != OpConst16 { - continue - } - c := auxIntToInt16(v_1.AuxInt) - if !(t.IsSigned() && isPowerOfTwo(-c)) { - continue - } - v.reset(OpNeg16) - v0 := b.NewValue0(v.Pos, OpLsh16x64, t) - v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v1.AuxInt = int64ToAuxInt(log16(-c)) - v0.AddArg2(n, v1) - v.AddArg(v0) - return true - } - break - } // match: (Mul16 (Const16 [c]) (Add16 (Const16 [d]) x)) + // cond: !isPowerOfTwo(c) // result: (Add16 (Const16 [c*d]) (Mul16 (Const16 [c]) x)) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -18735,6 +16632,9 @@ func rewriteValuegeneric_OpMul16(v *Value) bool { } d := auxIntToInt16(v_1_0.AuxInt) x := v_1_1 + if !(!isPowerOfTwo(c)) { + continue + } v.reset(OpAdd16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int16ToAuxInt(c * d) @@ -18761,6 +16661,53 @@ func rewriteValuegeneric_OpMul16(v *Value) bool { } break } + // match: (Mul16 x (Const16 [c])) + // cond: isPowerOfTwo(c) && v.Block.Func.pass.name != "opt" + // result: (Lsh16x64 x (Const64 [log16(c)])) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1.AuxInt) + if !(isPowerOfTwo(c) && v.Block.Func.pass.name != "opt") { + continue + } + v.reset(OpLsh16x64) + v.Type = t + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(log16(c)) + v.AddArg2(x, v0) + return true + } + break + } + // match: (Mul16 x (Const16 [c])) + // cond: t.IsSigned() && isPowerOfTwo(-c) && v.Block.Func.pass.name != "opt" + // result: (Neg16 (Lsh16x64 x (Const64 [log16(-c)]))) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1.AuxInt) + if !(t.IsSigned() && isPowerOfTwo(-c) && v.Block.Func.pass.name != "opt") { + continue + } + v.reset(OpNeg16) + v0 := b.NewValue0(v.Pos, OpLsh16x64, t) + v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v1.AuxInt = int64ToAuxInt(log16(-c)) + v0.AddArg2(x, v1) + v.AddArg(v0) + return true + } + break + } // match: (Mul16 (Mul16 i:(Const16 ) z) x) // cond: (z.Op != OpConst16 && x.Op != OpConst16) // result: (Mul16 i (Mul16 x z)) @@ -18874,54 +16821,8 @@ func rewriteValuegeneric_OpMul32(v *Value) bool { } break } - // match: (Mul32 n (Const32 [c])) - // cond: isPowerOfTwo(c) - // result: (Lsh32x64 n (Const64 [log32(c)])) - for { - t := v.Type - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - n := v_0 - if v_1.Op != OpConst32 { - continue - } - c := auxIntToInt32(v_1.AuxInt) - if !(isPowerOfTwo(c)) { - continue - } - v.reset(OpLsh32x64) - v.Type = t - v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v0.AuxInt = int64ToAuxInt(log32(c)) - v.AddArg2(n, v0) - return true - } - break - } - // match: (Mul32 n (Const32 [c])) - // cond: t.IsSigned() && isPowerOfTwo(-c) - // result: (Neg32 (Lsh32x64 n (Const64 [log32(-c)]))) - for { - t := v.Type - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - n := v_0 - if v_1.Op != OpConst32 { - continue - } - c := auxIntToInt32(v_1.AuxInt) - if !(t.IsSigned() && isPowerOfTwo(-c)) { - continue - } - v.reset(OpNeg32) - v0 := b.NewValue0(v.Pos, OpLsh32x64, t) - v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v1.AuxInt = int64ToAuxInt(log32(-c)) - v0.AddArg2(n, v1) - v.AddArg(v0) - return true - } - break - } // match: (Mul32 (Const32 [c]) (Add32 (Const32 [d]) x)) + // cond: !isPowerOfTwo(c) // result: (Add32 (Const32 [c*d]) (Mul32 (Const32 [c]) x)) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -18942,6 +16843,9 @@ func rewriteValuegeneric_OpMul32(v *Value) bool { } d := auxIntToInt32(v_1_0.AuxInt) x := v_1_1 + if !(!isPowerOfTwo(c)) { + continue + } v.reset(OpAdd32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int32ToAuxInt(c * d) @@ -18968,6 +16872,53 @@ func rewriteValuegeneric_OpMul32(v *Value) bool { } break } + // match: (Mul32 x (Const32 [c])) + // cond: isPowerOfTwo(c) && v.Block.Func.pass.name != "opt" + // result: (Lsh32x64 x (Const64 [log32(c)])) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1.AuxInt) + if !(isPowerOfTwo(c) && v.Block.Func.pass.name != "opt") { + continue + } + v.reset(OpLsh32x64) + v.Type = t + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(log32(c)) + v.AddArg2(x, v0) + return true + } + break + } + // match: (Mul32 x (Const32 [c])) + // cond: t.IsSigned() && isPowerOfTwo(-c) && v.Block.Func.pass.name != "opt" + // result: (Neg32 (Lsh32x64 x (Const64 [log32(-c)]))) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1.AuxInt) + if !(t.IsSigned() && isPowerOfTwo(-c) && v.Block.Func.pass.name != "opt") { + continue + } + v.reset(OpNeg32) + v0 := b.NewValue0(v.Pos, OpLsh32x64, t) + v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v1.AuxInt = int64ToAuxInt(log32(-c)) + v0.AddArg2(x, v1) + v.AddArg(v0) + return true + } + break + } // match: (Mul32 (Mul32 i:(Const32 ) z) x) // cond: (z.Op != OpConst32 && x.Op != OpConst32) // result: (Mul32 i (Mul32 x z)) @@ -19242,54 +17193,8 @@ func rewriteValuegeneric_OpMul64(v *Value) bool { } break } - // match: (Mul64 n (Const64 [c])) - // cond: isPowerOfTwo(c) - // result: (Lsh64x64 n (Const64 [log64(c)])) - for { - t := v.Type - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - n := v_0 - if v_1.Op != OpConst64 { - continue - } - c := auxIntToInt64(v_1.AuxInt) - if !(isPowerOfTwo(c)) { - continue - } - v.reset(OpLsh64x64) - v.Type = t - v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v0.AuxInt = int64ToAuxInt(log64(c)) - v.AddArg2(n, v0) - return true - } - break - } - // match: (Mul64 n (Const64 [c])) - // cond: t.IsSigned() && isPowerOfTwo(-c) - // result: (Neg64 (Lsh64x64 n (Const64 [log64(-c)]))) - for { - t := v.Type - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - n := v_0 - if v_1.Op != OpConst64 { - continue - } - c := auxIntToInt64(v_1.AuxInt) - if !(t.IsSigned() && isPowerOfTwo(-c)) { - continue - } - v.reset(OpNeg64) - v0 := b.NewValue0(v.Pos, OpLsh64x64, t) - v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v1.AuxInt = int64ToAuxInt(log64(-c)) - v0.AddArg2(n, v1) - v.AddArg(v0) - return true - } - break - } // match: (Mul64 (Const64 [c]) (Add64 (Const64 [d]) x)) + // cond: !isPowerOfTwo(c) // result: (Add64 (Const64 [c*d]) (Mul64 (Const64 [c]) x)) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -19310,6 +17215,9 @@ func rewriteValuegeneric_OpMul64(v *Value) bool { } d := auxIntToInt64(v_1_0.AuxInt) x := v_1_1 + if !(!isPowerOfTwo(c)) { + continue + } v.reset(OpAdd64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64ToAuxInt(c * d) @@ -19336,6 +17244,53 @@ func rewriteValuegeneric_OpMul64(v *Value) bool { } break } + // match: (Mul64 x (Const64 [c])) + // cond: isPowerOfTwo(c) && v.Block.Func.pass.name != "opt" + // result: (Lsh64x64 x (Const64 [log64(c)])) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo(c) && v.Block.Func.pass.name != "opt") { + continue + } + v.reset(OpLsh64x64) + v.Type = t + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(log64(c)) + v.AddArg2(x, v0) + return true + } + break + } + // match: (Mul64 x (Const64 [c])) + // cond: t.IsSigned() && isPowerOfTwo(-c) && v.Block.Func.pass.name != "opt" + // result: (Neg64 (Lsh64x64 x (Const64 [log64(-c)]))) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(t.IsSigned() && isPowerOfTwo(-c) && v.Block.Func.pass.name != "opt") { + continue + } + v.reset(OpNeg64) + v0 := b.NewValue0(v.Pos, OpLsh64x64, t) + v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v1.AuxInt = int64ToAuxInt(log64(-c)) + v0.AddArg2(x, v1) + v.AddArg(v0) + return true + } + break + } // match: (Mul64 (Mul64 i:(Const64 ) z) x) // cond: (z.Op != OpConst64 && x.Op != OpConst64) // result: (Mul64 i (Mul64 x z)) @@ -19610,54 +17565,8 @@ func rewriteValuegeneric_OpMul8(v *Value) bool { } break } - // match: (Mul8 n (Const8 [c])) - // cond: isPowerOfTwo(c) - // result: (Lsh8x64 n (Const64 [log8(c)])) - for { - t := v.Type - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - n := v_0 - if v_1.Op != OpConst8 { - continue - } - c := auxIntToInt8(v_1.AuxInt) - if !(isPowerOfTwo(c)) { - continue - } - v.reset(OpLsh8x64) - v.Type = t - v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v0.AuxInt = int64ToAuxInt(log8(c)) - v.AddArg2(n, v0) - return true - } - break - } - // match: (Mul8 n (Const8 [c])) - // cond: t.IsSigned() && isPowerOfTwo(-c) - // result: (Neg8 (Lsh8x64 n (Const64 [log8(-c)]))) - for { - t := v.Type - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - n := v_0 - if v_1.Op != OpConst8 { - continue - } - c := auxIntToInt8(v_1.AuxInt) - if !(t.IsSigned() && isPowerOfTwo(-c)) { - continue - } - v.reset(OpNeg8) - v0 := b.NewValue0(v.Pos, OpLsh8x64, t) - v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v1.AuxInt = int64ToAuxInt(log8(-c)) - v0.AddArg2(n, v1) - v.AddArg(v0) - return true - } - break - } // match: (Mul8 (Const8 [c]) (Add8 (Const8 [d]) x)) + // cond: !isPowerOfTwo(c) // result: (Add8 (Const8 [c*d]) (Mul8 (Const8 [c]) x)) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -19678,6 +17587,9 @@ func rewriteValuegeneric_OpMul8(v *Value) bool { } d := auxIntToInt8(v_1_0.AuxInt) x := v_1_1 + if !(!isPowerOfTwo(c)) { + continue + } v.reset(OpAdd8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int8ToAuxInt(c * d) @@ -19704,6 +17616,53 @@ func rewriteValuegeneric_OpMul8(v *Value) bool { } break } + // match: (Mul8 x (Const8 [c])) + // cond: isPowerOfTwo(c) && v.Block.Func.pass.name != "opt" + // result: (Lsh8x64 x (Const64 [log8(c)])) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1.AuxInt) + if !(isPowerOfTwo(c) && v.Block.Func.pass.name != "opt") { + continue + } + v.reset(OpLsh8x64) + v.Type = t + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(log8(c)) + v.AddArg2(x, v0) + return true + } + break + } + // match: (Mul8 x (Const8 [c])) + // cond: t.IsSigned() && isPowerOfTwo(-c) && v.Block.Func.pass.name != "opt" + // result: (Neg8 (Lsh8x64 x (Const64 [log8(-c)]))) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1.AuxInt) + if !(t.IsSigned() && isPowerOfTwo(-c) && v.Block.Func.pass.name != "opt") { + continue + } + v.reset(OpNeg8) + v0 := b.NewValue0(v.Pos, OpLsh8x64, t) + v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v1.AuxInt = int64ToAuxInt(log8(-c)) + v0.AddArg2(x, v1) + v.AddArg(v0) + return true + } + break + } // match: (Mul8 (Mul8 i:(Const8 ) z) x) // cond: (z.Op != OpConst8 && x.Op != OpConst8) // result: (Mul8 i (Mul8 x z)) @@ -20017,7 +17976,6 @@ func rewriteValuegeneric_OpNeq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types // match: (Neq16 x x) // result: (ConstBool [false]) for { @@ -20077,73 +18035,6 @@ func rewriteValuegeneric_OpNeq16(v *Value) bool { } break } - // match: (Neq16 n (Lsh16x64 (Rsh16x64 (Add16 n (Rsh16Ux64 (Rsh16x64 n (Const64 [15])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) - // cond: k > 0 && k < 15 && kbar == 16 - k - // result: (Neq16 (And16 n (Const16 [1< [0])) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - n := v_0 - if v_1.Op != OpLsh16x64 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh16x64 { - continue - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAdd16 { - continue - } - t := v_1_0_0.Type - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - v_1_0_0_1 := v_1_0_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 { - if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh16Ux64 || v_1_0_0_1.Type != t { - continue - } - _ = v_1_0_0_1.Args[1] - v_1_0_0_1_0 := v_1_0_0_1.Args[0] - if v_1_0_0_1_0.Op != OpRsh16x64 || v_1_0_0_1_0.Type != t { - continue - } - _ = v_1_0_0_1_0.Args[1] - if n != v_1_0_0_1_0.Args[0] { - continue - } - v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1] - if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 15 { - continue - } - v_1_0_0_1_1 := v_1_0_0_1.Args[1] - if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 { - continue - } - kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt) - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { - continue - } - k := auxIntToInt64(v_1_0_1.AuxInt) - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 15 && kbar == 16-k) { - continue - } - v.reset(OpNeq16) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v1 := b.NewValue0(v.Pos, OpConst16, t) - v1.AuxInt = int16ToAuxInt(1< n (Rsh32Ux64 (Rsh32x64 n (Const64 [31])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) - // cond: k > 0 && k < 31 && kbar == 32 - k - // result: (Neq32 (And32 n (Const32 [1< [0])) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - n := v_0 - if v_1.Op != OpLsh32x64 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh32x64 { - continue - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAdd32 { - continue - } - t := v_1_0_0.Type - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - v_1_0_0_1 := v_1_0_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 { - if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh32Ux64 || v_1_0_0_1.Type != t { - continue - } - _ = v_1_0_0_1.Args[1] - v_1_0_0_1_0 := v_1_0_0_1.Args[0] - if v_1_0_0_1_0.Op != OpRsh32x64 || v_1_0_0_1_0.Type != t { - continue - } - _ = v_1_0_0_1_0.Args[1] - if n != v_1_0_0_1_0.Args[0] { - continue - } - v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1] - if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 31 { - continue - } - v_1_0_0_1_1 := v_1_0_0_1.Args[1] - if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 { - continue - } - kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt) - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { - continue - } - k := auxIntToInt64(v_1_0_1.AuxInt) - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 31 && kbar == 32-k) { - continue - } - v.reset(OpNeq32) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v1 := b.NewValue0(v.Pos, OpConst32, t) - v1.AuxInt = int32ToAuxInt(1< n (Rsh64Ux64 (Rsh64x64 n (Const64 [63])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) - // cond: k > 0 && k < 63 && kbar == 64 - k - // result: (Neq64 (And64 n (Const64 [1< [0])) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - n := v_0 - if v_1.Op != OpLsh64x64 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh64x64 { - continue - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAdd64 { - continue - } - t := v_1_0_0.Type - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - v_1_0_0_1 := v_1_0_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 { - if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh64Ux64 || v_1_0_0_1.Type != t { - continue - } - _ = v_1_0_0_1.Args[1] - v_1_0_0_1_0 := v_1_0_0_1.Args[0] - if v_1_0_0_1_0.Op != OpRsh64x64 || v_1_0_0_1_0.Type != t { - continue - } - _ = v_1_0_0_1_0.Args[1] - if n != v_1_0_0_1_0.Args[0] { - continue - } - v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1] - if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 63 { - continue - } - v_1_0_0_1_1 := v_1_0_0_1.Args[1] - if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 { - continue - } - kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt) - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { - continue - } - k := auxIntToInt64(v_1_0_1.AuxInt) - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 63 && kbar == 64-k) { - continue - } - v.reset(OpNeq64) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = int64ToAuxInt(1< n (Rsh8Ux64 (Rsh8x64 n (Const64 [ 7])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) - // cond: k > 0 && k < 7 && kbar == 8 - k - // result: (Neq8 (And8 n (Const8 [1< [0])) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - n := v_0 - if v_1.Op != OpLsh8x64 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpRsh8x64 { - continue - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpAdd8 { - continue - } - t := v_1_0_0.Type - _ = v_1_0_0.Args[1] - v_1_0_0_0 := v_1_0_0.Args[0] - v_1_0_0_1 := v_1_0_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 { - if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh8Ux64 || v_1_0_0_1.Type != t { - continue - } - _ = v_1_0_0_1.Args[1] - v_1_0_0_1_0 := v_1_0_0_1.Args[0] - if v_1_0_0_1_0.Op != OpRsh8x64 || v_1_0_0_1_0.Type != t { - continue - } - _ = v_1_0_0_1_0.Args[1] - if n != v_1_0_0_1_0.Args[0] { - continue - } - v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1] - if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 7 { - continue - } - v_1_0_0_1_1 := v_1_0_0_1.Args[1] - if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 { - continue - } - kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt) - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { - continue - } - k := auxIntToInt64(v_1_0_1.AuxInt) - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 7 && kbar == 8-k) { - continue - } - v.reset(OpNeq8) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v1 := b.NewValue0(v.Pos, OpConst8, t) - v1.AuxInt = int8ToAuxInt(1<= d // result: (Less64U (Const64 [c-d]) (Sub64 x (Const64 [d]))) @@ -25269,6 +23242,558 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { } break } + // match: (OrB (Neq64F x x) (Less64F x y:(Const64F [c]))) + // result: (Not (Leq64F y x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess64F { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst64F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq64F, typ.Bool) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Leq64F x y:(Const64F [c]))) + // result: (Not (Less64F y x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq64F { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst64F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess64F, typ.Bool) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Less64F y:(Const64F [c]) x)) + // result: (Not (Leq64F x y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess64F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst64F { + continue + } + if x != v_1.Args[1] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq64F, typ.Bool) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Leq64F y:(Const64F [c]) x)) + // result: (Not (Less64F x y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq64F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst64F { + continue + } + if x != v_1.Args[1] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess64F, typ.Bool) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq32F x x) (Less32F x y:(Const32F [c]))) + // result: (Not (Leq32F y x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess32F { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst32F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq32F, typ.Bool) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq32F x x) (Leq32F x y:(Const32F [c]))) + // result: (Not (Less32F y x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq32F { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst32F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess32F, typ.Bool) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq32F x x) (Less32F y:(Const32F [c]) x)) + // result: (Not (Leq32F x y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess32F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst32F { + continue + } + if x != v_1.Args[1] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq32F, typ.Bool) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq32F x x) (Leq32F y:(Const32F [c]) x)) + // result: (Not (Less32F x y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq32F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst32F { + continue + } + if x != v_1.Args[1] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess32F, typ.Bool) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Less64F abs:(Abs x) y:(Const64F [c]))) + // result: (Not (Leq64F y abs)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess64F { + continue + } + _ = v_1.Args[1] + abs := v_1.Args[0] + if abs.Op != OpAbs || x != abs.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst64F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq64F, typ.Bool) + v0.AddArg2(y, abs) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Leq64F abs:(Abs x) y:(Const64F [c]))) + // result: (Not (Less64F y abs)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq64F { + continue + } + _ = v_1.Args[1] + abs := v_1.Args[0] + if abs.Op != OpAbs || x != abs.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst64F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess64F, typ.Bool) + v0.AddArg2(y, abs) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Less64F y:(Const64F [c]) abs:(Abs x))) + // result: (Not (Leq64F abs y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess64F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst64F { + continue + } + abs := v_1.Args[1] + if abs.Op != OpAbs || x != abs.Args[0] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq64F, typ.Bool) + v0.AddArg2(abs, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Leq64F y:(Const64F [c]) abs:(Abs x))) + // result: (Not (Less64F abs y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq64F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst64F { + continue + } + abs := v_1.Args[1] + if abs.Op != OpAbs || x != abs.Args[0] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess64F, typ.Bool) + v0.AddArg2(abs, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Less64F neg:(Neg64F x) y:(Const64F [c]))) + // result: (Not (Leq64F y neg)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess64F { + continue + } + _ = v_1.Args[1] + neg := v_1.Args[0] + if neg.Op != OpNeg64F || x != neg.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst64F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq64F, typ.Bool) + v0.AddArg2(y, neg) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Leq64F neg:(Neg64F x) y:(Const64F [c]))) + // result: (Not (Less64F y neg)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq64F { + continue + } + _ = v_1.Args[1] + neg := v_1.Args[0] + if neg.Op != OpNeg64F || x != neg.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst64F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess64F, typ.Bool) + v0.AddArg2(y, neg) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Less64F y:(Const64F [c]) neg:(Neg64F x))) + // result: (Not (Leq64F neg y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess64F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst64F { + continue + } + neg := v_1.Args[1] + if neg.Op != OpNeg64F || x != neg.Args[0] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq64F, typ.Bool) + v0.AddArg2(neg, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Leq64F y:(Const64F [c]) neg:(Neg64F x))) + // result: (Not (Less64F neg y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq64F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst64F { + continue + } + neg := v_1.Args[1] + if neg.Op != OpNeg64F || x != neg.Args[0] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess64F, typ.Bool) + v0.AddArg2(neg, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq32F x x) (Less32F neg:(Neg32F x) y:(Const32F [c]))) + // result: (Not (Leq32F y neg)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess32F { + continue + } + _ = v_1.Args[1] + neg := v_1.Args[0] + if neg.Op != OpNeg32F || x != neg.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst32F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq32F, typ.Bool) + v0.AddArg2(y, neg) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq32F x x) (Leq32F neg:(Neg32F x) y:(Const32F [c]))) + // result: (Not (Less32F y neg)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq32F { + continue + } + _ = v_1.Args[1] + neg := v_1.Args[0] + if neg.Op != OpNeg32F || x != neg.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst32F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess32F, typ.Bool) + v0.AddArg2(y, neg) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq32F x x) (Less32F y:(Const32F [c]) neg:(Neg32F x))) + // result: (Not (Leq32F neg y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess32F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst32F { + continue + } + neg := v_1.Args[1] + if neg.Op != OpNeg32F || x != neg.Args[0] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq32F, typ.Bool) + v0.AddArg2(neg, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq32F x x) (Leq32F y:(Const32F [c]) neg:(Neg32F x))) + // result: (Not (Less32F neg y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq32F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst32F { + continue + } + neg := v_1.Args[1] + if neg.Op != OpNeg32F || x != neg.Args[0] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess32F, typ.Bool) + v0.AddArg2(neg, y) + v.AddArg(v0) + return true + } + break + } return false } func rewriteValuegeneric_OpPhi(v *Value) bool { @@ -27823,6 +26348,27 @@ func rewriteValuegeneric_OpRsh16Ux16(v *Value) bool { v.AuxInt = int16ToAuxInt(0) return true } + // match: (Rsh16Ux16 [false] x con:(Const16 [c])) + // cond: 0 < c && c < 16 + // result: (Rsh16Ux16 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst16 { + break + } + c := auxIntToInt16(con.AuxInt) + if !(0 < c && c < 16) { + break + } + v.reset(OpRsh16Ux16) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh16Ux32(v *Value) bool { @@ -27854,6 +26400,27 @@ func rewriteValuegeneric_OpRsh16Ux32(v *Value) bool { v.AuxInt = int16ToAuxInt(0) return true } + // match: (Rsh16Ux32 [false] x con:(Const32 [c])) + // cond: 0 < c && c < 16 + // result: (Rsh16Ux32 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst32 { + break + } + c := auxIntToInt32(con.AuxInt) + if !(0 < c && c < 16) { + break + } + v.reset(OpRsh16Ux32) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh16Ux64(v *Value) bool { @@ -28038,6 +26605,27 @@ func rewriteValuegeneric_OpRsh16Ux64(v *Value) bool { v.AddArg(v0) return true } + // match: (Rsh16Ux64 [false] x con:(Const64 [c])) + // cond: 0 < c && c < 16 + // result: (Rsh16Ux64 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst64 { + break + } + c := auxIntToInt64(con.AuxInt) + if !(0 < c && c < 16) { + break + } + v.reset(OpRsh16Ux64) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh16Ux8(v *Value) bool { @@ -28069,6 +26657,27 @@ func rewriteValuegeneric_OpRsh16Ux8(v *Value) bool { v.AuxInt = int16ToAuxInt(0) return true } + // match: (Rsh16Ux8 [false] x con:(Const8 [c])) + // cond: 0 < c && c < 16 + // result: (Rsh16Ux8 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst8 { + break + } + c := auxIntToInt8(con.AuxInt) + if !(0 < c && c < 16) { + break + } + v.reset(OpRsh16Ux8) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh16x16(v *Value) bool { @@ -28100,6 +26709,27 @@ func rewriteValuegeneric_OpRsh16x16(v *Value) bool { v.AuxInt = int16ToAuxInt(0) return true } + // match: (Rsh16x16 [false] x con:(Const16 [c])) + // cond: 0 < c && c < 16 + // result: (Rsh16x16 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst16 { + break + } + c := auxIntToInt16(con.AuxInt) + if !(0 < c && c < 16) { + break + } + v.reset(OpRsh16x16) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh16x32(v *Value) bool { @@ -28131,6 +26761,27 @@ func rewriteValuegeneric_OpRsh16x32(v *Value) bool { v.AuxInt = int16ToAuxInt(0) return true } + // match: (Rsh16x32 [false] x con:(Const32 [c])) + // cond: 0 < c && c < 16 + // result: (Rsh16x32 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst32 { + break + } + c := auxIntToInt32(con.AuxInt) + if !(0 < c && c < 16) { + break + } + v.reset(OpRsh16x32) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh16x64(v *Value) bool { @@ -28219,6 +26870,27 @@ func rewriteValuegeneric_OpRsh16x64(v *Value) bool { v.AddArg(v0) return true } + // match: (Rsh16x64 [false] x con:(Const64 [c])) + // cond: 0 < c && c < 16 + // result: (Rsh16x64 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst64 { + break + } + c := auxIntToInt64(con.AuxInt) + if !(0 < c && c < 16) { + break + } + v.reset(OpRsh16x64) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh16x8(v *Value) bool { @@ -28250,6 +26922,27 @@ func rewriteValuegeneric_OpRsh16x8(v *Value) bool { v.AuxInt = int16ToAuxInt(0) return true } + // match: (Rsh16x8 [false] x con:(Const8 [c])) + // cond: 0 < c && c < 16 + // result: (Rsh16x8 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst8 { + break + } + c := auxIntToInt8(con.AuxInt) + if !(0 < c && c < 16) { + break + } + v.reset(OpRsh16x8) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh32Ux16(v *Value) bool { @@ -28281,6 +26974,27 @@ func rewriteValuegeneric_OpRsh32Ux16(v *Value) bool { v.AuxInt = int32ToAuxInt(0) return true } + // match: (Rsh32Ux16 [false] x con:(Const16 [c])) + // cond: 0 < c && c < 32 + // result: (Rsh32Ux16 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst16 { + break + } + c := auxIntToInt16(con.AuxInt) + if !(0 < c && c < 32) { + break + } + v.reset(OpRsh32Ux16) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh32Ux32(v *Value) bool { @@ -28312,6 +27026,27 @@ func rewriteValuegeneric_OpRsh32Ux32(v *Value) bool { v.AuxInt = int32ToAuxInt(0) return true } + // match: (Rsh32Ux32 [false] x con:(Const32 [c])) + // cond: 0 < c && c < 32 + // result: (Rsh32Ux32 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst32 { + break + } + c := auxIntToInt32(con.AuxInt) + if !(0 < c && c < 32) { + break + } + v.reset(OpRsh32Ux32) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh32Ux64(v *Value) bool { @@ -28514,6 +27249,27 @@ func rewriteValuegeneric_OpRsh32Ux64(v *Value) bool { v.AddArg(v0) return true } + // match: (Rsh32Ux64 [false] x con:(Const64 [c])) + // cond: 0 < c && c < 32 + // result: (Rsh32Ux64 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst64 { + break + } + c := auxIntToInt64(con.AuxInt) + if !(0 < c && c < 32) { + break + } + v.reset(OpRsh32Ux64) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh32Ux8(v *Value) bool { @@ -28545,6 +27301,27 @@ func rewriteValuegeneric_OpRsh32Ux8(v *Value) bool { v.AuxInt = int32ToAuxInt(0) return true } + // match: (Rsh32Ux8 [false] x con:(Const8 [c])) + // cond: 0 < c && c < 32 + // result: (Rsh32Ux8 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst8 { + break + } + c := auxIntToInt8(con.AuxInt) + if !(0 < c && c < 32) { + break + } + v.reset(OpRsh32Ux8) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh32x16(v *Value) bool { @@ -28576,6 +27353,27 @@ func rewriteValuegeneric_OpRsh32x16(v *Value) bool { v.AuxInt = int32ToAuxInt(0) return true } + // match: (Rsh32x16 [false] x con:(Const16 [c])) + // cond: 0 < c && c < 32 + // result: (Rsh32x16 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst16 { + break + } + c := auxIntToInt16(con.AuxInt) + if !(0 < c && c < 32) { + break + } + v.reset(OpRsh32x16) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh32x32(v *Value) bool { @@ -28607,6 +27405,27 @@ func rewriteValuegeneric_OpRsh32x32(v *Value) bool { v.AuxInt = int32ToAuxInt(0) return true } + // match: (Rsh32x32 [false] x con:(Const32 [c])) + // cond: 0 < c && c < 32 + // result: (Rsh32x32 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst32 { + break + } + c := auxIntToInt32(con.AuxInt) + if !(0 < c && c < 32) { + break + } + v.reset(OpRsh32x32) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh32x64(v *Value) bool { @@ -28713,6 +27532,27 @@ func rewriteValuegeneric_OpRsh32x64(v *Value) bool { v.AddArg(v0) return true } + // match: (Rsh32x64 [false] x con:(Const64 [c])) + // cond: 0 < c && c < 32 + // result: (Rsh32x64 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst64 { + break + } + c := auxIntToInt64(con.AuxInt) + if !(0 < c && c < 32) { + break + } + v.reset(OpRsh32x64) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh32x8(v *Value) bool { @@ -28744,6 +27584,27 @@ func rewriteValuegeneric_OpRsh32x8(v *Value) bool { v.AuxInt = int32ToAuxInt(0) return true } + // match: (Rsh32x8 [false] x con:(Const8 [c])) + // cond: 0 < c && c < 32 + // result: (Rsh32x8 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst8 { + break + } + c := auxIntToInt8(con.AuxInt) + if !(0 < c && c < 32) { + break + } + v.reset(OpRsh32x8) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh64Ux16(v *Value) bool { @@ -28775,6 +27636,27 @@ func rewriteValuegeneric_OpRsh64Ux16(v *Value) bool { v.AuxInt = int64ToAuxInt(0) return true } + // match: (Rsh64Ux16 [false] x con:(Const16 [c])) + // cond: 0 < c && c < 64 + // result: (Rsh64Ux16 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst16 { + break + } + c := auxIntToInt16(con.AuxInt) + if !(0 < c && c < 64) { + break + } + v.reset(OpRsh64Ux16) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh64Ux32(v *Value) bool { @@ -28806,6 +27688,27 @@ func rewriteValuegeneric_OpRsh64Ux32(v *Value) bool { v.AuxInt = int64ToAuxInt(0) return true } + // match: (Rsh64Ux32 [false] x con:(Const32 [c])) + // cond: 0 < c && c < 64 + // result: (Rsh64Ux32 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst32 { + break + } + c := auxIntToInt32(con.AuxInt) + if !(0 < c && c < 64) { + break + } + v.reset(OpRsh64Ux32) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh64Ux64(v *Value) bool { @@ -29026,6 +27929,27 @@ func rewriteValuegeneric_OpRsh64Ux64(v *Value) bool { v.AddArg(v0) return true } + // match: (Rsh64Ux64 [false] x con:(Const64 [c])) + // cond: 0 < c && c < 64 + // result: (Rsh64Ux64 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst64 { + break + } + c := auxIntToInt64(con.AuxInt) + if !(0 < c && c < 64) { + break + } + v.reset(OpRsh64Ux64) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh64Ux8(v *Value) bool { @@ -29057,6 +27981,27 @@ func rewriteValuegeneric_OpRsh64Ux8(v *Value) bool { v.AuxInt = int64ToAuxInt(0) return true } + // match: (Rsh64Ux8 [false] x con:(Const8 [c])) + // cond: 0 < c && c < 64 + // result: (Rsh64Ux8 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst8 { + break + } + c := auxIntToInt8(con.AuxInt) + if !(0 < c && c < 64) { + break + } + v.reset(OpRsh64Ux8) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh64x16(v *Value) bool { @@ -29088,6 +28033,27 @@ func rewriteValuegeneric_OpRsh64x16(v *Value) bool { v.AuxInt = int64ToAuxInt(0) return true } + // match: (Rsh64x16 [false] x con:(Const16 [c])) + // cond: 0 < c && c < 64 + // result: (Rsh64x16 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst16 { + break + } + c := auxIntToInt16(con.AuxInt) + if !(0 < c && c < 64) { + break + } + v.reset(OpRsh64x16) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh64x32(v *Value) bool { @@ -29119,6 +28085,27 @@ func rewriteValuegeneric_OpRsh64x32(v *Value) bool { v.AuxInt = int64ToAuxInt(0) return true } + // match: (Rsh64x32 [false] x con:(Const32 [c])) + // cond: 0 < c && c < 64 + // result: (Rsh64x32 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst32 { + break + } + c := auxIntToInt32(con.AuxInt) + if !(0 < c && c < 64) { + break + } + v.reset(OpRsh64x32) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh64x64(v *Value) bool { @@ -29243,6 +28230,27 @@ func rewriteValuegeneric_OpRsh64x64(v *Value) bool { v.AddArg(v0) return true } + // match: (Rsh64x64 [false] x con:(Const64 [c])) + // cond: 0 < c && c < 64 + // result: (Rsh64x64 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst64 { + break + } + c := auxIntToInt64(con.AuxInt) + if !(0 < c && c < 64) { + break + } + v.reset(OpRsh64x64) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh64x8(v *Value) bool { @@ -29274,6 +28282,27 @@ func rewriteValuegeneric_OpRsh64x8(v *Value) bool { v.AuxInt = int64ToAuxInt(0) return true } + // match: (Rsh64x8 [false] x con:(Const8 [c])) + // cond: 0 < c && c < 64 + // result: (Rsh64x8 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst8 { + break + } + c := auxIntToInt8(con.AuxInt) + if !(0 < c && c < 64) { + break + } + v.reset(OpRsh64x8) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh8Ux16(v *Value) bool { @@ -29305,6 +28334,27 @@ func rewriteValuegeneric_OpRsh8Ux16(v *Value) bool { v.AuxInt = int8ToAuxInt(0) return true } + // match: (Rsh8Ux16 [false] x con:(Const16 [c])) + // cond: 0 < c && c < 8 + // result: (Rsh8Ux16 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst16 { + break + } + c := auxIntToInt16(con.AuxInt) + if !(0 < c && c < 8) { + break + } + v.reset(OpRsh8Ux16) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh8Ux32(v *Value) bool { @@ -29336,6 +28386,27 @@ func rewriteValuegeneric_OpRsh8Ux32(v *Value) bool { v.AuxInt = int8ToAuxInt(0) return true } + // match: (Rsh8Ux32 [false] x con:(Const32 [c])) + // cond: 0 < c && c < 8 + // result: (Rsh8Ux32 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst32 { + break + } + c := auxIntToInt32(con.AuxInt) + if !(0 < c && c < 8) { + break + } + v.reset(OpRsh8Ux32) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh8Ux64(v *Value) bool { @@ -29502,6 +28573,27 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value) bool { v.AddArg2(x, v0) return true } + // match: (Rsh8Ux64 [false] x con:(Const64 [c])) + // cond: 0 < c && c < 8 + // result: (Rsh8Ux64 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst64 { + break + } + c := auxIntToInt64(con.AuxInt) + if !(0 < c && c < 8) { + break + } + v.reset(OpRsh8Ux64) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh8Ux8(v *Value) bool { @@ -29533,6 +28625,27 @@ func rewriteValuegeneric_OpRsh8Ux8(v *Value) bool { v.AuxInt = int8ToAuxInt(0) return true } + // match: (Rsh8Ux8 [false] x con:(Const8 [c])) + // cond: 0 < c && c < 8 + // result: (Rsh8Ux8 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst8 { + break + } + c := auxIntToInt8(con.AuxInt) + if !(0 < c && c < 8) { + break + } + v.reset(OpRsh8Ux8) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh8x16(v *Value) bool { @@ -29564,6 +28677,27 @@ func rewriteValuegeneric_OpRsh8x16(v *Value) bool { v.AuxInt = int8ToAuxInt(0) return true } + // match: (Rsh8x16 [false] x con:(Const16 [c])) + // cond: 0 < c && c < 8 + // result: (Rsh8x16 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst16 { + break + } + c := auxIntToInt16(con.AuxInt) + if !(0 < c && c < 8) { + break + } + v.reset(OpRsh8x16) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh8x32(v *Value) bool { @@ -29595,6 +28729,27 @@ func rewriteValuegeneric_OpRsh8x32(v *Value) bool { v.AuxInt = int8ToAuxInt(0) return true } + // match: (Rsh8x32 [false] x con:(Const32 [c])) + // cond: 0 < c && c < 8 + // result: (Rsh8x32 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst32 { + break + } + c := auxIntToInt32(con.AuxInt) + if !(0 < c && c < 8) { + break + } + v.reset(OpRsh8x32) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh8x64(v *Value) bool { @@ -29664,6 +28819,27 @@ func rewriteValuegeneric_OpRsh8x64(v *Value) bool { v.AddArg2(x, v0) return true } + // match: (Rsh8x64 [false] x con:(Const64 [c])) + // cond: 0 < c && c < 8 + // result: (Rsh8x64 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst64 { + break + } + c := auxIntToInt64(con.AuxInt) + if !(0 < c && c < 8) { + break + } + v.reset(OpRsh8x64) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpRsh8x8(v *Value) bool { @@ -29695,10 +28871,50 @@ func rewriteValuegeneric_OpRsh8x8(v *Value) bool { v.AuxInt = int8ToAuxInt(0) return true } + // match: (Rsh8x8 [false] x con:(Const8 [c])) + // cond: 0 < c && c < 8 + // result: (Rsh8x8 [true] x con) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + con := v_1 + if con.Op != OpConst8 { + break + } + c := auxIntToInt8(con.AuxInt) + if !(0 < c && c < 8) { + break + } + v.reset(OpRsh8x8) + v.AuxInt = boolToAuxInt(true) + v.AddArg2(x, con) + return true + } return false } func rewriteValuegeneric_OpSelect0(v *Value) bool { v_0 := v.Args[0] + // match: (Select0 a:(Add64carry x y (Const64 [0]))) + // cond: a.Uses == 1 + // result: (Add64 x y) + for { + a := v_0 + if a.Op != OpAdd64carry { + break + } + _ = a.Args[2] + x := a.Args[0] + y := a.Args[1] + a_2 := a.Args[2] + if a_2.Op != OpConst64 || auxIntToInt64(a_2.AuxInt) != 0 || !(a.Uses == 1) { + break + } + v.reset(OpAdd64) + v.AddArg2(x, y) + return true + } // match: (Select0 (MakeTuple x y)) // result: x for { @@ -31460,7 +30676,7 @@ func rewriteValuegeneric_OpStaticLECall(v *Value) bool { return true } // match: (StaticLECall {f} typ_ x y mem) - // cond: isSameCall(f, "runtime.efaceeq") && isDirectType(typ_) && clobber(v) + // cond: isSameCall(f, "runtime.efaceeq") && isDirectAndComparableType(typ_) && clobber(v) // result: (MakeResult (EqPtr x y) mem) for { if len(v.Args) != 4 { @@ -31471,7 +30687,7 @@ func rewriteValuegeneric_OpStaticLECall(v *Value) bool { typ_ := v.Args[0] x := v.Args[1] y := v.Args[2] - if !(isSameCall(f, "runtime.efaceeq") && isDirectType(typ_) && clobber(v)) { + if !(isSameCall(f, "runtime.efaceeq") && isDirectAndComparableType(typ_) && clobber(v)) { break } v.reset(OpMakeResult) @@ -31481,7 +30697,7 @@ func rewriteValuegeneric_OpStaticLECall(v *Value) bool { return true } // match: (StaticLECall {f} itab x y mem) - // cond: isSameCall(f, "runtime.ifaceeq") && isDirectIface(itab) && clobber(v) + // cond: isSameCall(f, "runtime.ifaceeq") && isDirectAndComparableIface(itab) && clobber(v) // result: (MakeResult (EqPtr x y) mem) for { if len(v.Args) != 4 { @@ -31492,7 +30708,7 @@ func rewriteValuegeneric_OpStaticLECall(v *Value) bool { itab := v.Args[0] x := v.Args[1] y := v.Args[2] - if !(isSameCall(f, "runtime.ifaceeq") && isDirectIface(itab) && clobber(v)) { + if !(isSameCall(f, "runtime.ifaceeq") && isDirectAndComparableIface(itab) && clobber(v)) { break } v.reset(OpMakeResult) @@ -31910,27 +31126,27 @@ func rewriteValuegeneric_OpStore(v *Value) bool { v.AddArg3(dst, e, mem) return true } - // match: (Store (SelectN [0] call:(StaticLECall _ _)) x mem:(SelectN [1] call)) - // cond: isConstZero(x) && isSameCall(call.Aux, "runtime.newobject") + // match: (Store (SelectN [0] call:(StaticLECall ___)) x mem:(SelectN [1] call)) + // cond: isConstZero(x) && isMalloc(call.Aux) // result: mem for { if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 { break } call := v_0.Args[0] - if call.Op != OpStaticLECall || len(call.Args) != 2 { + if call.Op != OpStaticLECall { break } x := v_1 mem := v_2 - if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")) { + if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isMalloc(call.Aux)) { break } v.copyOf(mem) return true } - // match: (Store (OffPtr (SelectN [0] call:(StaticLECall _ _))) x mem:(SelectN [1] call)) - // cond: isConstZero(x) && isSameCall(call.Aux, "runtime.newobject") + // match: (Store (OffPtr (SelectN [0] call:(StaticLECall ___))) x mem:(SelectN [1] call)) + // cond: isConstZero(x) && isMalloc(call.Aux) // result: mem for { if v_0.Op != OpOffPtr { @@ -31941,12 +31157,12 @@ func rewriteValuegeneric_OpStore(v *Value) bool { break } call := v_0_0.Args[0] - if call.Op != OpStaticLECall || len(call.Args) != 2 { + if call.Op != OpStaticLECall { break } x := v_1 mem := v_2 - if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")) { + if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isMalloc(call.Aux)) { break } v.copyOf(mem) @@ -36289,19 +35505,19 @@ func rewriteValuegeneric_OpZero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call)) - // cond: isSameCall(call.Aux, "runtime.newobject") + // match: (Zero (SelectN [0] call:(StaticLECall ___)) mem:(SelectN [1] call)) + // cond: isMalloc(call.Aux) // result: mem for { if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 { break } call := v_0.Args[0] - if call.Op != OpStaticLECall || len(call.Args) != 2 { + if call.Op != OpStaticLECall { break } mem := v_1 - if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isSameCall(call.Aux, "runtime.newobject")) { + if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isMalloc(call.Aux)) { break } v.copyOf(mem) diff --git a/src/cmd/compile/internal/ssa/sccp.go b/src/cmd/compile/internal/ssa/sccp.go index ecc0f94e5b9..9b958d04547 100644 --- a/src/cmd/compile/internal/ssa/sccp.go +++ b/src/cmd/compile/internal/ssa/sccp.go @@ -4,10 +4,6 @@ package ssa -import ( - "fmt" -) - // ---------------------------------------------------------------------------- // Sparse Conditional Constant Propagation // @@ -118,7 +114,7 @@ func sccp(f *Func) { constCnt, rewireCnt := t.replaceConst() if f.pass.debug > 0 { if constCnt > 0 || rewireCnt > 0 { - fmt.Printf("Phase SCCP for %v : %v constants, %v dce\n", f.Name, constCnt, rewireCnt) + f.Warnl(f.Entry.Pos, "Phase SCCP for %v : %v constants, %v dce", f.Name, constCnt, rewireCnt) } } } @@ -377,7 +373,7 @@ func (t *worklist) visitValue(val *Value) { // re-visit all uses of value if its lattice is changed newLt := t.getLatticeCell(val) if !equals(newLt, oldLt) { - if int8(oldLt.tag) > int8(newLt.tag) { + if oldLt.tag > newLt.tag { t.f.Fatalf("Must lower lattice\n") } t.addUses(val) @@ -563,7 +559,7 @@ func (t *worklist) replaceConst() (int, int) { if lt.tag == constant { if !isConst(val) { if t.f.pass.debug > 0 { - fmt.Printf("Replace %v with %v\n", val.LongString(), lt.val.LongString()) + t.f.Warnl(val.Pos, "Replace %v with %v", val.LongString(), lt.val.LongString()) } val.reset(lt.val.Op) val.AuxInt = lt.val.AuxInt @@ -575,7 +571,7 @@ func (t *worklist) replaceConst() (int, int) { if rewireSuccessor(block, lt.val) { rewireCnt++ if t.f.pass.debug > 0 { - fmt.Printf("Rewire %v %v successors\n", block.Kind, block) + t.f.Warnl(block.Pos, "Rewire %v %v successors", block.Kind, block) } } } diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go index 325118a1827..80062531453 100644 --- a/src/cmd/compile/internal/ssa/schedule.go +++ b/src/cmd/compile/internal/ssa/schedule.go @@ -36,13 +36,13 @@ type ValHeap struct { func (h ValHeap) Len() int { return len(h.a) } func (h ValHeap) Swap(i, j int) { a := h.a; a[i], a[j] = a[j], a[i] } -func (h *ValHeap) Push(x interface{}) { +func (h *ValHeap) Push(x any) { // Push and Pop use pointer receivers because they modify the slice's length, // not just its contents. v := x.(*Value) h.a = append(h.a, v) } -func (h *ValHeap) Pop() interface{} { +func (h *ValHeap) Pop() any { old := h.a n := len(old) x := old[n-1] diff --git a/src/cmd/compile/internal/ssa/sizeof_test.go b/src/cmd/compile/internal/ssa/sizeof_test.go index 9a58197925c..c81b66f76a6 100644 --- a/src/cmd/compile/internal/ssa/sizeof_test.go +++ b/src/cmd/compile/internal/ssa/sizeof_test.go @@ -16,9 +16,9 @@ func TestSizeof(t *testing.T) { const _64bit = unsafe.Sizeof(uintptr(0)) == 8 var tests = []struct { - val interface{} // type as a value - _32bit uintptr // size on 32bit platforms - _64bit uintptr // size on 64bit platforms + val any // type as a value + _32bit uintptr // size on 32bit platforms + _64bit uintptr // size on 64bit platforms }{ {Value{}, 72, 112}, {Block{}, 168, 312}, diff --git a/src/cmd/compile/internal/ssa/softfloat.go b/src/cmd/compile/internal/ssa/softfloat.go index 351f824a9f5..cd6df5fe8b0 100644 --- a/src/cmd/compile/internal/ssa/softfloat.go +++ b/src/cmd/compile/internal/ssa/softfloat.go @@ -73,7 +73,7 @@ func softfloat(f *Func) { if newInt64 && f.Config.RegSize == 4 { // On 32bit arch, decompose Uint64 introduced in the switch above. - decomposeBuiltIn(f) + decomposeBuiltin(f) applyRewrite(f, rewriteBlockdec64, rewriteValuedec64, removeDeadValues) } diff --git a/src/cmd/compile/internal/ssa/stackalloc.go b/src/cmd/compile/internal/ssa/stackalloc.go index 11ffe5b55ea..06097e95da0 100644 --- a/src/cmd/compile/internal/ssa/stackalloc.go +++ b/src/cmd/compile/internal/ssa/stackalloc.go @@ -56,10 +56,35 @@ func putStackAllocState(s *stackAllocState) { } type stackValState struct { - typ *types.Type - spill *Value - needSlot bool - isArg bool + typ *types.Type + spill *Value + needSlot bool + isArg bool + defBlock ID + useBlocks []stackUseBlock +} + +// addUseBlock adds a block to the set of blocks that uses this value. +// Note that we only loosely enforce the set property by checking the last +// block that was appended to the list and duplicates may occur. +// Because we add values block by block (barring phi-nodes), the number of duplicates is +// small and we deduplicate as part of the liveness algorithm later anyway. +func (sv *stackValState) addUseBlock(b *Block, liveout bool) { + entry := stackUseBlock{ + b: b, + liveout: liveout, + } + if sv.useBlocks == nil || sv.useBlocks[len(sv.useBlocks)-1] != entry { + sv.useBlocks = append(sv.useBlocks, stackUseBlock{ + b: b, + liveout: liveout, + }) + } +} + +type stackUseBlock struct { + b *Block + liveout bool } // stackalloc allocates storage in the stack frame for @@ -99,6 +124,7 @@ func (s *stackAllocState) init(f *Func, spillLive [][]ID) { s.values[v.ID].typ = v.Type s.values[v.ID].needSlot = !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && f.getHome(v.ID) == nil && !v.rematerializeable() && !v.OnWasmStack s.values[v.ID].isArg = hasAnyArgOp(v) + s.values[v.ID].defBlock = b.ID if f.pass.debug > stackDebug && s.values[v.ID].needSlot { fmt.Printf("%s needs a stack slot\n", v) } @@ -291,80 +317,89 @@ func (s *stackAllocState) stackalloc() { // computeLive computes a map from block ID to a list of // stack-slot-needing value IDs live at the end of that block. -// TODO: this could be quadratic if lots of variables are live across lots of -// basic blocks. Figure out a way to make this function (or, more precisely, the user -// of this function) require only linear size & time. func (s *stackAllocState) computeLive(spillLive [][]ID) { - s.live = make([][]ID, s.f.NumBlocks()) - var phis []*Value - live := s.f.newSparseSet(s.f.NumValues()) - defer s.f.retSparseSet(live) - t := s.f.newSparseSet(s.f.NumValues()) - defer s.f.retSparseSet(t) - // Instead of iterating over f.Blocks, iterate over their postordering. - // Liveness information flows backward, so starting at the end - // increases the probability that we will stabilize quickly. - po := s.f.postorder() - for { - changed := false - for _, b := range po { - // Start with known live values at the end of the block - live.clear() - live.addAll(s.live[b.ID]) - - // Propagate backwards to the start of the block - phis = phis[:0] - for i := len(b.Values) - 1; i >= 0; i-- { - v := b.Values[i] - live.remove(v.ID) - if v.Op == OpPhi { - // Save phi for later. - // Note: its args might need a stack slot even though - // the phi itself doesn't. So don't use needSlot. - if !v.Type.IsMemory() && !v.Type.IsVoid() { - phis = append(phis, v) - } - continue - } - for _, a := range v.Args { - if s.values[a.ID].needSlot { - live.add(a.ID) - } - } - } - - // for each predecessor of b, expand its list of live-at-end values - // invariant: s contains the values live at the start of b (excluding phi inputs) - for i, e := range b.Preds { - p := e.b - t.clear() - t.addAll(s.live[p.ID]) - t.addAll(live.contents()) - t.addAll(spillLive[p.ID]) - for _, v := range phis { - a := v.Args[i] - if s.values[a.ID].needSlot { - t.add(a.ID) - } - if spill := s.values[a.ID].spill; spill != nil { - //TODO: remove? Subsumed by SpillUse? - t.add(spill.ID) - } - } - if t.size() == len(s.live[p.ID]) { - continue - } - // grow p's live set - s.live[p.ID] = append(s.live[p.ID][:0], t.contents()...) - changed = true - } + // Because values using stack slots are few and far inbetween + // (compared to the set of all values), we use a path exploration + // algorithm to calculate liveness here. + f := s.f + for _, b := range f.Blocks { + for _, spillvid := range spillLive[b.ID] { + val := &s.values[spillvid] + val.addUseBlock(b, true) } - - if !changed { - break + for _, v := range b.Values { + for i, a := range v.Args { + val := &s.values[a.ID] + useBlock := b + forceLiveout := false + if v.Op == OpPhi { + useBlock = b.Preds[i].b + forceLiveout = true + if spill := val.spill; spill != nil { + //TODO: remove? Subsumed by SpillUse? + s.values[spill.ID].addUseBlock(useBlock, true) + } + } + if !val.needSlot { + continue + } + val.addUseBlock(useBlock, forceLiveout) + } } } + + s.live = make([][]ID, f.NumBlocks()) + push := func(bid, vid ID) { + l := s.live[bid] + if l == nil || l[len(l)-1] != vid { + l = append(l, vid) + s.live[bid] = l + } + } + // TODO: If we can help along the interference graph by calculating livein sets, + // we can do so trivially by turning this sparse set into an array of arrays + // and checking the top for the current value instead of inclusion in the sparse set. + seen := f.newSparseSet(f.NumBlocks()) + defer f.retSparseSet(seen) + // instead of pruning out duplicate blocks when we build the useblocks slices + // or when we add them to the queue, rely on the seen set to stop considering + // them. This is slightly faster than building the workqueues as sets + // + // However, this means that the queue can grow larger than the number of blocks, + // usually in very short functions. Returning a slice with values appended beyond the + // original allocation can corrupt the allocator state, so cap the queue and return + // the originally allocated slice regardless. + allocedBqueue := f.Cache.allocBlockSlice(f.NumBlocks()) + defer f.Cache.freeBlockSlice(allocedBqueue) + bqueue := allocedBqueue[:0:f.NumBlocks()] + + for vid, v := range s.values { + if !v.needSlot { + continue + } + seen.clear() + bqueue = bqueue[:0] + for _, b := range v.useBlocks { + if b.liveout { + push(b.b.ID, ID(vid)) + } + bqueue = append(bqueue, b.b) + } + for len(bqueue) > 0 { + work := bqueue[len(bqueue)-1] + bqueue = bqueue[:len(bqueue)-1] + if seen.contains(work.ID) || work.ID == v.defBlock { + continue + } + seen.add(work.ID) + for _, e := range work.Preds { + push(e.b.ID, ID(vid)) + bqueue = append(bqueue, e.b) + } + } + } + if s.f.pass.debug > stackDebug { for _, b := range s.f.Blocks { fmt.Printf("stacklive %s %v\n", b, s.live[b.ID]) diff --git a/src/cmd/compile/internal/ssa/stmtlines_test.go b/src/cmd/compile/internal/ssa/stmtlines_test.go index 8cd11e9828e..2bdd6c80b2d 100644 --- a/src/cmd/compile/internal/ssa/stmtlines_test.go +++ b/src/cmd/compile/internal/ssa/stmtlines_test.go @@ -137,17 +137,18 @@ func TestStmtLines(t *testing.T) { } } - var m int - if runtime.GOARCH == "amd64" { - m = 1 // > 99% obtained on amd64, no backsliding - } else if runtime.GOARCH == "riscv64" { - m = 3 // XXX temporary update threshold to 97% for regabi - } else { - m = 2 // expect 98% elsewhere. + var m float64 + switch runtime.GOARCH { + case "amd64": + m = 0.0111 // > 98.89% obtained on amd64, no backsliding + case "riscv64": + m = 0.03 // XXX temporary update threshold to 97% for regabi + default: + m = 0.02 // expect 98% elsewhere. } - if len(nonStmtLines)*100 > m*len(lines) { - t.Errorf("Saw too many (%s, > %d%%) lines without statement marks, total=%d, nostmt=%d ('-run TestStmtLines -v' lists failing lines)\n", runtime.GOARCH, m, len(lines), len(nonStmtLines)) + if float64(len(nonStmtLines)) > m*float64(len(lines)) { + t.Errorf("Saw too many (%s, > %.1f%%) lines without statement marks, total=%d, nostmt=%d ('-run TestStmtLines -v' lists failing lines)\n", runtime.GOARCH, m*100, len(lines), len(nonStmtLines)) } t.Logf("Saw %d out of %d lines without statement marks", len(nonStmtLines), len(lines)) if testing.Verbose() { diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go index 3b9cadf6f18..5f604099010 100644 --- a/src/cmd/compile/internal/ssa/value.go +++ b/src/cmd/compile/internal/ssa/value.go @@ -472,9 +472,9 @@ func (v *Value) copyIntoWithXPos(b *Block, pos src.XPos) *Value { return c } -func (v *Value) Logf(msg string, args ...interface{}) { v.Block.Logf(msg, args...) } -func (v *Value) Log() bool { return v.Block.Log() } -func (v *Value) Fatalf(msg string, args ...interface{}) { +func (v *Value) Logf(msg string, args ...any) { v.Block.Logf(msg, args...) } +func (v *Value) Log() bool { return v.Block.Log() } +func (v *Value) Fatalf(msg string, args ...any) { v.Block.Func.fe.Fatalf(v.Pos, msg, args...) } diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go index ec6901f13ec..ec5a0fed29d 100644 --- a/src/cmd/compile/internal/ssa/writebarrier.go +++ b/src/cmd/compile/internal/ssa/writebarrier.go @@ -798,7 +798,16 @@ func IsNewObject(v *Value, select1 []*Value) (mem *Value, ok bool) { if call.Op != OpStaticCall { return nil, false } - if !isSameCall(call.Aux, "runtime.newobject") { + // Check for new object, or for new object calls that have been transformed into size-specialized malloc calls. + // Calls that have return type unsafe pointer may have originally been produced by flushPendingHeapAllocations + // in the ssa generator, so may have not originally been newObject calls. + var numParameters int64 + switch { + case isNewObject(call.Aux): + numParameters = 1 + case isSpecializedMalloc(call.Aux) && !v.Type.IsUnsafePtr(): + numParameters = 3 + default: return nil, false } if f.ABIDefault == f.ABI1 && len(c.intParamRegs) >= 1 { @@ -813,7 +822,7 @@ func IsNewObject(v *Value, select1 []*Value) (mem *Value, ok bool) { if v.Args[0].Args[0].Op != OpSP { return nil, false } - if v.Args[0].AuxInt != c.ctxt.Arch.FixedFrameSize+c.RegSize { // offset of return value + if v.Args[0].AuxInt != c.ctxt.Arch.FixedFrameSize+numParameters*c.RegSize { // offset of return value return nil, false } return mem, true diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index b3b9314b0d1..a20529258a6 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1220,11 +1220,11 @@ func initIntrinsics(cfg *intrinsicBuildConfig) { alias("math/bits", "OnesCount", "math/bits", "OnesCount64", p8...) - addF("math/bits", "Mul64", + add("math/bits", "Mul64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1]) }, - sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64, sys.RISCV64, sys.Loong64) + all...) alias("math/bits", "Mul", "math/bits", "Mul64", p8...) alias("internal/runtime/math", "Mul64", "math/bits", "Mul64", p8...) addF("math/bits", "Add64", @@ -1604,6 +1604,36 @@ func initIntrinsics(cfg *intrinsicBuildConfig) { }, sys.AMD64) + /******** crypto/internal/constanttime ********/ + // We implement a superset of the Select promise: + // Select returns x if v != 0 and y if v == 0. + add("crypto/internal/constanttime", "Select", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v, x, y := args[0], args[1], args[2] + + var checkOp ssa.Op + var zero *ssa.Value + switch s.config.PtrSize { + case 8: + checkOp = ssa.OpNeq64 + zero = s.constInt64(types.Types[types.TINT], 0) + case 4: + checkOp = ssa.OpNeq32 + zero = s.constInt32(types.Types[types.TINT], 0) + default: + panic("unreachable") + } + check := s.newValue2(checkOp, types.Types[types.TBOOL], zero, v) + + return s.newValue3(ssa.OpCondSelect, types.Types[types.TINT], x, y, check) + }, + sys.ArchAMD64, sys.ArchARM64, sys.ArchLoong64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchWasm) // all with CMOV support. + add("crypto/internal/constanttime", "boolToUint8", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpCvtBoolToUint8, types.Types[types.TUINT8], args[0]) + }, + all...) + if buildcfg.Experiment.SIMD { // Only enable intrinsics, if SIMD experiment. simdIntrinsics(addF) diff --git a/src/cmd/compile/internal/ssagen/intrinsics_test.go b/src/cmd/compile/internal/ssagen/intrinsics_test.go index 7a212f1c3ae..0c483d49c33 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics_test.go +++ b/src/cmd/compile/internal/ssagen/intrinsics_test.go @@ -36,6 +36,7 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"386", "internal/runtime/sys", "TrailingZeros64"}: struct{}{}, {"386", "internal/runtime/sys", "TrailingZeros8"}: struct{}{}, {"386", "math", "sqrt"}: struct{}{}, + {"386", "math/bits", "Mul64"}: struct{}{}, {"386", "math/bits", "ReverseBytes32"}: struct{}{}, {"386", "math/bits", "ReverseBytes64"}: struct{}{}, {"386", "math/bits", "TrailingZeros16"}: struct{}{}, @@ -44,6 +45,7 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"386", "math/bits", "TrailingZeros8"}: struct{}{}, {"386", "runtime", "KeepAlive"}: struct{}{}, {"386", "runtime", "slicebytetostringtmp"}: struct{}{}, + {"386", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, {"amd64", "internal/runtime/atomic", "And"}: struct{}{}, {"amd64", "internal/runtime/atomic", "And32"}: struct{}{}, {"amd64", "internal/runtime/atomic", "And64"}: struct{}{}, @@ -190,6 +192,8 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"amd64", "sync/atomic", "SwapUint32"}: struct{}{}, {"amd64", "sync/atomic", "SwapUint64"}: struct{}{}, {"amd64", "sync/atomic", "SwapUintptr"}: struct{}{}, + {"amd64", "crypto/internal/constanttime", "Select"}: struct{}{}, + {"amd64", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, {"arm", "internal/runtime/sys", "Bswap32"}: struct{}{}, {"arm", "internal/runtime/sys", "Bswap64"}: struct{}{}, {"arm", "internal/runtime/sys", "GetCallerPC"}: struct{}{}, @@ -208,6 +212,7 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"arm", "math/bits", "Len32"}: struct{}{}, {"arm", "math/bits", "Len64"}: struct{}{}, {"arm", "math/bits", "Len8"}: struct{}{}, + {"arm", "math/bits", "Mul64"}: struct{}{}, {"arm", "math/bits", "ReverseBytes32"}: struct{}{}, {"arm", "math/bits", "ReverseBytes64"}: struct{}{}, {"arm", "math/bits", "RotateLeft32"}: struct{}{}, @@ -217,6 +222,7 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"arm", "math/bits", "TrailingZeros8"}: struct{}{}, {"arm", "runtime", "KeepAlive"}: struct{}{}, {"arm", "runtime", "slicebytetostringtmp"}: struct{}{}, + {"arm", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, {"arm64", "internal/runtime/atomic", "And"}: struct{}{}, {"arm64", "internal/runtime/atomic", "And32"}: struct{}{}, {"arm64", "internal/runtime/atomic", "And64"}: struct{}{}, @@ -361,6 +367,8 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"arm64", "sync/atomic", "SwapUint32"}: struct{}{}, {"arm64", "sync/atomic", "SwapUint64"}: struct{}{}, {"arm64", "sync/atomic", "SwapUintptr"}: struct{}{}, + {"arm64", "crypto/internal/constanttime", "Select"}: struct{}{}, + {"arm64", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, {"loong64", "internal/runtime/atomic", "And"}: struct{}{}, {"loong64", "internal/runtime/atomic", "And32"}: struct{}{}, {"loong64", "internal/runtime/atomic", "And64"}: struct{}{}, @@ -507,6 +515,8 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"loong64", "sync/atomic", "SwapUint32"}: struct{}{}, {"loong64", "sync/atomic", "SwapUint64"}: struct{}{}, {"loong64", "sync/atomic", "SwapUintptr"}: struct{}{}, + {"loong64", "crypto/internal/constanttime", "Select"}: struct{}{}, + {"loong64", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, {"mips", "internal/runtime/atomic", "And"}: struct{}{}, {"mips", "internal/runtime/atomic", "And8"}: struct{}{}, {"mips", "internal/runtime/atomic", "Cas"}: struct{}{}, @@ -552,6 +562,7 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"mips", "math/bits", "Len32"}: struct{}{}, {"mips", "math/bits", "Len64"}: struct{}{}, {"mips", "math/bits", "Len8"}: struct{}{}, + {"mips", "math/bits", "Mul64"}: struct{}{}, {"mips", "math/bits", "TrailingZeros16"}: struct{}{}, {"mips", "math/bits", "TrailingZeros32"}: struct{}{}, {"mips", "math/bits", "TrailingZeros64"}: struct{}{}, @@ -577,6 +588,7 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"mips", "sync/atomic", "SwapInt32"}: struct{}{}, {"mips", "sync/atomic", "SwapUint32"}: struct{}{}, {"mips", "sync/atomic", "SwapUintptr"}: struct{}{}, + {"mips", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, {"mips64", "internal/runtime/atomic", "And"}: struct{}{}, {"mips64", "internal/runtime/atomic", "And8"}: struct{}{}, {"mips64", "internal/runtime/atomic", "Cas"}: struct{}{}, @@ -665,6 +677,7 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"mips64", "sync/atomic", "SwapUint32"}: struct{}{}, {"mips64", "sync/atomic", "SwapUint64"}: struct{}{}, {"mips64", "sync/atomic", "SwapUintptr"}: struct{}{}, + {"mips64", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, {"mips64le", "internal/runtime/atomic", "And"}: struct{}{}, {"mips64le", "internal/runtime/atomic", "And8"}: struct{}{}, {"mips64le", "internal/runtime/atomic", "Cas"}: struct{}{}, @@ -753,6 +766,7 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"mips64le", "sync/atomic", "SwapUint32"}: struct{}{}, {"mips64le", "sync/atomic", "SwapUint64"}: struct{}{}, {"mips64le", "sync/atomic", "SwapUintptr"}: struct{}{}, + {"mips64le", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, {"mipsle", "internal/runtime/atomic", "And"}: struct{}{}, {"mipsle", "internal/runtime/atomic", "And8"}: struct{}{}, {"mipsle", "internal/runtime/atomic", "Cas"}: struct{}{}, @@ -798,6 +812,7 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"mipsle", "math/bits", "Len32"}: struct{}{}, {"mipsle", "math/bits", "Len64"}: struct{}{}, {"mipsle", "math/bits", "Len8"}: struct{}{}, + {"mipsle", "math/bits", "Mul64"}: struct{}{}, {"mipsle", "math/bits", "TrailingZeros16"}: struct{}{}, {"mipsle", "math/bits", "TrailingZeros32"}: struct{}{}, {"mipsle", "math/bits", "TrailingZeros64"}: struct{}{}, @@ -823,6 +838,7 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"mipsle", "sync/atomic", "SwapInt32"}: struct{}{}, {"mipsle", "sync/atomic", "SwapUint32"}: struct{}{}, {"mipsle", "sync/atomic", "SwapUintptr"}: struct{}{}, + {"mipsle", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, {"ppc64", "internal/runtime/atomic", "And"}: struct{}{}, {"ppc64", "internal/runtime/atomic", "And8"}: struct{}{}, {"ppc64", "internal/runtime/atomic", "Cas"}: struct{}{}, @@ -947,6 +963,8 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"ppc64", "sync/atomic", "SwapUint32"}: struct{}{}, {"ppc64", "sync/atomic", "SwapUint64"}: struct{}{}, {"ppc64", "sync/atomic", "SwapUintptr"}: struct{}{}, + {"ppc64", "crypto/internal/constanttime", "Select"}: struct{}{}, + {"ppc64", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, {"ppc64le", "internal/runtime/atomic", "And"}: struct{}{}, {"ppc64le", "internal/runtime/atomic", "And8"}: struct{}{}, {"ppc64le", "internal/runtime/atomic", "Cas"}: struct{}{}, @@ -1071,6 +1089,8 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"ppc64le", "sync/atomic", "SwapUint32"}: struct{}{}, {"ppc64le", "sync/atomic", "SwapUint64"}: struct{}{}, {"ppc64le", "sync/atomic", "SwapUintptr"}: struct{}{}, + {"ppc64le", "crypto/internal/constanttime", "Select"}: struct{}{}, + {"ppc64le", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, {"riscv64", "internal/runtime/atomic", "And"}: struct{}{}, {"riscv64", "internal/runtime/atomic", "And8"}: struct{}{}, {"riscv64", "internal/runtime/atomic", "Cas"}: struct{}{}, @@ -1191,6 +1211,7 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"riscv64", "sync/atomic", "SwapUint32"}: struct{}{}, {"riscv64", "sync/atomic", "SwapUint64"}: struct{}{}, {"riscv64", "sync/atomic", "SwapUintptr"}: struct{}{}, + {"riscv64", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, {"s390x", "internal/runtime/atomic", "And"}: struct{}{}, {"s390x", "internal/runtime/atomic", "And8"}: struct{}{}, {"s390x", "internal/runtime/atomic", "Cas"}: struct{}{}, @@ -1309,6 +1330,8 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"s390x", "sync/atomic", "SwapUint32"}: struct{}{}, {"s390x", "sync/atomic", "SwapUint64"}: struct{}{}, {"s390x", "sync/atomic", "SwapUintptr"}: struct{}{}, + {"s390x", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, + {"wasm", "internal/runtime/math", "Mul64"}: struct{}{}, {"wasm", "internal/runtime/sys", "GetCallerPC"}: struct{}{}, {"wasm", "internal/runtime/sys", "GetCallerSP"}: struct{}{}, {"wasm", "internal/runtime/sys", "GetClosurePtr"}: struct{}{}, @@ -1325,11 +1348,14 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"wasm", "math", "RoundToEven"}: struct{}{}, {"wasm", "math", "Trunc"}: struct{}{}, {"wasm", "math", "sqrt"}: struct{}{}, + {"wasm", "math/big", "mulWW"}: struct{}{}, {"wasm", "math/bits", "Len"}: struct{}{}, {"wasm", "math/bits", "Len16"}: struct{}{}, {"wasm", "math/bits", "Len32"}: struct{}{}, {"wasm", "math/bits", "Len64"}: struct{}{}, {"wasm", "math/bits", "Len8"}: struct{}{}, + {"wasm", "math/bits", "Mul"}: struct{}{}, + {"wasm", "math/bits", "Mul64"}: struct{}{}, {"wasm", "math/bits", "OnesCount"}: struct{}{}, {"wasm", "math/bits", "OnesCount16"}: struct{}{}, {"wasm", "math/bits", "OnesCount32"}: struct{}{}, @@ -1344,6 +1370,8 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"wasm", "math/bits", "TrailingZeros8"}: struct{}{}, {"wasm", "runtime", "KeepAlive"}: struct{}{}, {"wasm", "runtime", "slicebytetostringtmp"}: struct{}{}, + {"wasm", "crypto/internal/constanttime", "Select"}: struct{}{}, + {"wasm", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, } func TestIntrinsics(t *testing.T) { diff --git a/src/cmd/compile/internal/ssagen/phi.go b/src/cmd/compile/internal/ssagen/phi.go index 19b6920913d..4043ac45764 100644 --- a/src/cmd/compile/internal/ssagen/phi.go +++ b/src/cmd/compile/internal/ssagen/phi.go @@ -253,7 +253,7 @@ func (s *phiState) insertVarPhis(n int, var_ ir.Node, defs []*ssa.Block, typ *ty } // Add a phi to block c for variable n. hasPhi.add(c.ID) - v := c.NewValue0I(currentRoot.Pos, ssa.OpPhi, typ, int64(n)) // TODO: line number right? + v := c.NewValue0I(s.s.blockStarts[b.ID], ssa.OpPhi, typ, int64(n)) // Note: we store the variable number in the phi's AuxInt field. Used temporarily by phi building. if var_.Op() == ir.ONAME { s.s.addNamedValue(var_.(*ir.Name), v) @@ -396,11 +396,11 @@ type blockHeap struct { func (h *blockHeap) Len() int { return len(h.a) } func (h *blockHeap) Swap(i, j int) { a := h.a; a[i], a[j] = a[j], a[i] } -func (h *blockHeap) Push(x interface{}) { +func (h *blockHeap) Push(x any) { v := x.(*ssa.Block) h.a = append(h.a, v) } -func (h *blockHeap) Pop() interface{} { +func (h *blockHeap) Pop() any { old := h.a n := len(old) x := old[n-1] @@ -513,6 +513,7 @@ loop: v.Op = ssa.OpPhi v.AddArgs(args...) v.Aux = nil + v.Pos = s.s.blockStarts[b.ID] continue loop } w = a // save witness diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 37aad360f2a..e854cd98950 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -12,6 +12,7 @@ import ( "go/constant" "html" "internal/buildcfg" + "internal/runtime/gc" "os" "path/filepath" "slices" @@ -124,6 +125,15 @@ func InitConfig() { ir.Syms.Goschedguarded = typecheck.LookupRuntimeFunc("goschedguarded") ir.Syms.Growslice = typecheck.LookupRuntimeFunc("growslice") ir.Syms.InterfaceSwitch = typecheck.LookupRuntimeFunc("interfaceSwitch") + for i := 1; i < len(ir.Syms.MallocGCSmallNoScan); i++ { + ir.Syms.MallocGCSmallNoScan[i] = typecheck.LookupRuntimeFunc(fmt.Sprintf("mallocgcSmallNoScanSC%d", i)) + } + for i := 1; i < len(ir.Syms.MallocGCSmallScanNoHeader); i++ { + ir.Syms.MallocGCSmallScanNoHeader[i] = typecheck.LookupRuntimeFunc(fmt.Sprintf("mallocgcSmallScanNoHeaderSC%d", i)) + } + for i := 1; i < len(ir.Syms.MallocGCTiny); i++ { + ir.Syms.MallocGCTiny[i] = typecheck.LookupRuntimeFunc(fmt.Sprintf("mallocTiny%d", i)) + } ir.Syms.MallocGC = typecheck.LookupRuntimeFunc("mallocgc") ir.Syms.Memmove = typecheck.LookupRuntimeFunc("memmove") ir.Syms.Msanread = typecheck.LookupRuntimeFunc("msanread") @@ -695,7 +705,7 @@ func allocAlign(t *types.Type) int64 { func (s *state) newHeapaddr(n *ir.Name) { size := allocSize(n.Type()) if n.Type().HasPointers() || size >= maxAggregatedHeapAllocation || size == 0 { - s.setHeapaddr(n.Pos(), n, s.newObject(n.Type(), nil)) + s.setHeapaddr(n.Pos(), n, s.newObject(n.Type())) return } @@ -714,7 +724,7 @@ func (s *state) newHeapaddr(n *ir.Name) { // Make an allocation, but the type being allocated is just // the first pending object. We will come back and update it // later if needed. - allocCall = s.newObject(n.Type(), nil) + allocCall = s.newObjectNonSpecialized(n.Type(), nil) } else { allocCall = s.pendingHeapAllocations[0].Args[0] } @@ -767,7 +777,11 @@ func (s *state) flushPendingHeapAllocations() { s.constBool(true), // needZero TODO: false is ok? call.Args[1], // memory } - call.Aux = ssa.StaticAuxCall(ir.Syms.MallocGC, s.f.ABIDefault.ABIAnalyzeTypes( + mallocSym := ir.Syms.MallocGC + if specialMallocSym := s.specializedMallocSym(size, false); specialMallocSym != nil { + mallocSym = specialMallocSym + } + call.Aux = ssa.StaticAuxCall(mallocSym, s.f.ABIDefault.ABIAnalyzeTypes( []*types.Type{args[0].Type, args[1].Type, args[2].Type}, []*types.Type{types.Types[types.TUNSAFEPTR]}, )) @@ -779,6 +793,43 @@ func (s *state) flushPendingHeapAllocations() { ptr.Type = types.Types[types.TUNSAFEPTR] } +func (s *state) specializedMallocSym(size int64, hasPointers bool) *obj.LSym { + if !s.sizeSpecializedMallocEnabled() { + return nil + } + ptrSize := s.config.PtrSize + ptrBits := ptrSize * 8 + minSizeForMallocHeader := ptrSize * ptrBits + heapBitsInSpan := size <= minSizeForMallocHeader + if !heapBitsInSpan { + return nil + } + divRoundUp := func(n, a uintptr) uintptr { return (n + a - 1) / a } + sizeClass := gc.SizeToSizeClass8[divRoundUp(uintptr(size), gc.SmallSizeDiv)] + if hasPointers { + return ir.Syms.MallocGCSmallScanNoHeader[sizeClass] + } + if size < gc.TinySize { + return ir.Syms.MallocGCTiny[size] + } + return ir.Syms.MallocGCSmallNoScan[sizeClass] +} + +func (s *state) sizeSpecializedMallocEnabled() bool { + if base.Flag.CompilingRuntime { + // The compiler forces the values of the asan, msan, and race flags to false if + // we're compiling the runtime, so we lose the information about whether we're + // building in asan, msan, or race mode. Because the specialized functions don't + // work in that mode, just turn if off in that case. + // TODO(matloob): Save the information about whether the flags were passed in + // originally so we can turn off size specialized malloc in that case instead + // using Instrumenting below. Then we can remove this condition. + return false + } + + return buildcfg.Experiment.SizeSpecializedMalloc && !base.Flag.Cfg.Instrumenting +} + // setHeapaddr allocates a new PAUTO variable to store ptr (which must be non-nil) // and then sets it as n's heap address. func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) { @@ -801,7 +852,24 @@ func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) { } // newObject returns an SSA value denoting new(typ). -func (s *state) newObject(typ *types.Type, rtype *ssa.Value) *ssa.Value { +func (s *state) newObject(typ *types.Type) *ssa.Value { + if typ.Size() == 0 { + return s.newValue1A(ssa.OpAddr, types.NewPtr(typ), ir.Syms.Zerobase, s.sb) + } + rtype := s.reflectType(typ) + if specialMallocSym := s.specializedMallocSym(typ.Size(), typ.HasPointers()); specialMallocSym != nil { + return s.rtcall(specialMallocSym, true, []*types.Type{types.NewPtr(typ)}, + s.constInt(types.Types[types.TUINTPTR], typ.Size()), + rtype, + s.constBool(true), + )[0] + } + return s.rtcall(ir.Syms.Newobject, true, []*types.Type{types.NewPtr(typ)}, rtype)[0] +} + +// newObjectNonSpecialized returns an SSA value denoting new(typ). It does +// not produce size-specialized malloc functions. +func (s *state) newObjectNonSpecialized(typ *types.Type, rtype *ssa.Value) *ssa.Value { if typ.Size() == 0 { return s.newValue1A(ssa.OpAddr, types.NewPtr(typ), ir.Syms.Zerobase, s.sb) } @@ -1025,6 +1093,9 @@ type state struct { // First argument of append calls that could be stack allocated. appendTargets map[ir.Node]bool + + // Block starting position, indexed by block id. + blockStarts []src.XPos } type funcLine struct { @@ -1049,13 +1120,13 @@ func (s *state) label(sym *types.Sym) *ssaLabel { return lab } -func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) } -func (s *state) Log() bool { return s.f.Log() } -func (s *state) Fatalf(msg string, args ...interface{}) { +func (s *state) Logf(msg string, args ...any) { s.f.Logf(msg, args...) } +func (s *state) Log() bool { return s.f.Log() } +func (s *state) Fatalf(msg string, args ...any) { s.f.Frontend().Fatalf(s.peekPos(), msg, args...) } -func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) } -func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() } +func (s *state) Warnl(pos src.XPos, msg string, args ...any) { s.f.Warnl(pos, msg, args...) } +func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() } func ssaMarker(name string) *ir.Name { return ir.NewNameAt(base.Pos, &types.Sym{Name: name}, nil) @@ -1083,6 +1154,9 @@ func (s *state) startBlock(b *ssa.Block) { s.curBlock = b s.vars = map[ir.Node]*ssa.Value{} clear(s.fwdVars) + for len(s.blockStarts) <= int(b.ID) { + s.blockStarts = append(s.blockStarts, src.NoXPos) + } } // endBlock marks the end of generating code for the current block. @@ -1109,6 +1183,9 @@ func (s *state) endBlock() *ssa.Block { b.Pos = src.NoXPos } else { b.Pos = s.lastPos + if s.blockStarts[b.ID] == src.NoXPos { + s.blockStarts[b.ID] = s.lastPos + } } return b } @@ -1125,6 +1202,11 @@ func (s *state) pushLine(line src.XPos) { } else { s.lastPos = line } + // The first position we see for a new block is its starting position + // (the line number for its phis, if any). + if b := s.curBlock; b != nil && s.blockStarts[b.ID] == src.NoXPos { + s.blockStarts[b.ID] = line + } s.line = append(s.line, line) } @@ -2584,13 +2666,13 @@ var fpConvOpToSSA = map[twoTypes]twoOpsAndType{ {types.TFLOAT32, types.TUINT8}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32}, {types.TFLOAT32, types.TUINT16}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32}, - {types.TFLOAT32, types.TUINT32}: {ssa.OpCvt32Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned - {types.TFLOAT32, types.TUINT64}: {ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt32Fto64U, branchy code expansion instead + {types.TFLOAT32, types.TUINT32}: {ssa.OpInvalid, ssa.OpCopy, types.TINT64}, // Cvt64Fto32U, branchy code expansion instead + {types.TFLOAT32, types.TUINT64}: {ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt32Fto64U, branchy code expansion instead {types.TFLOAT64, types.TUINT8}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32}, {types.TFLOAT64, types.TUINT16}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32}, - {types.TFLOAT64, types.TUINT32}: {ssa.OpCvt64Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned - {types.TFLOAT64, types.TUINT64}: {ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt64Fto64U, branchy code expansion instead + {types.TFLOAT64, types.TUINT32}: {ssa.OpInvalid, ssa.OpCopy, types.TINT64}, // Cvt64Fto32U, branchy code expansion instead + {types.TFLOAT64, types.TUINT64}: {ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt64Fto64U, branchy code expansion instead // float {types.TFLOAT64, types.TFLOAT32}: {ssa.OpCvt64Fto32F, ssa.OpCopy, types.TFLOAT32}, @@ -2807,7 +2889,19 @@ func (s *state) conv(n ir.Node, v *ssa.Value, ft, tt *types.Type) *ssa.Value { } if ft.IsFloat() || tt.IsFloat() { - conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}] + cft, ctt := s.concreteEtype(ft), s.concreteEtype(tt) + conv, ok := fpConvOpToSSA[twoTypes{cft, ctt}] + // there's a change to a conversion-op table, this restores the old behavior if ConvertHash is false. + // use salted hash to distinguish unsigned convert at a Pos from signed convert at a Pos + if ctt == types.TUINT32 && ft.IsFloat() && !base.ConvertHash.MatchPosWithInfo(n.Pos(), "U", nil) { + // revert to old behavior + conv.op1 = ssa.OpCvt64Fto64 + if cft == types.TFLOAT32 { + conv.op1 = ssa.OpCvt32Fto64 + } + conv.op2 = ssa.OpTrunc64to32 + + } if s.config.RegSize == 4 && Arch.LinkArch.Family != sys.MIPS && !s.softFloat { if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { conv = conv1 @@ -2870,10 +2964,23 @@ func (s *state) conv(n ir.Node, v *ssa.Value, ft, tt *types.Type) *ssa.Value { } // ft is float32 or float64, and tt is unsigned integer if ft.Size() == 4 { - return s.float32ToUint64(n, v, ft, tt) + switch tt.Size() { + case 8: + return s.float32ToUint64(n, v, ft, tt) + case 4, 2, 1: + // TODO should 2 and 1 saturate or truncate? + return s.float32ToUint32(n, v, ft, tt) + } } if ft.Size() == 8 { - return s.float64ToUint64(n, v, ft, tt) + switch tt.Size() { + case 8: + return s.float64ToUint64(n, v, ft, tt) + case 4, 2, 1: + // TODO should 2 and 1 saturate or truncate? + return s.float64ToUint32(n, v, ft, tt) + } + } s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt) return nil @@ -3591,11 +3698,10 @@ func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value { case ir.ONEW: n := n.(*ir.UnaryExpr) - var rtype *ssa.Value if x, ok := n.X.(*ir.DynamicType); ok && x.Op() == ir.ODYNAMICTYPE { - rtype = s.expr(x.RType) + return s.newObjectNonSpecialized(n.Type().Elem(), s.expr(x.RType)) } - return s.newObject(n.Type().Elem(), rtype) + return s.newObject(n.Type().Elem()) case ir.OUNSAFEADD: n := n.(*ir.BinaryExpr) @@ -5565,7 +5671,9 @@ func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n ir.Node, x *ssa.Value, ft, // equal to 10000000001; that rounds up, and the 1 cannot // be lost else it would round down if the LSB of the // candidate mantissa is 0. + cmp := s.newValue2(cvttab.leq, types.Types[types.TBOOL], s.zeroVal(ft), x) + b := s.endBlock() b.Kind = ssa.BlockIf b.SetControl(cmp) @@ -5791,34 +5899,63 @@ func (s *state) float64ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ss func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { // cutoff:=1<<(intY_Size-1) // if x < floatX(cutoff) { - // result = uintY(x) + // result = uintY(x) // bThen + // // gated by ConvertHash, clamp negative inputs to zero + // if x < 0 { // unlikely + // result = 0 // bZero + // } // } else { - // y = x - floatX(cutoff) + // y = x - floatX(cutoff) // bElse // z = uintY(y) // result = z | -(cutoff) // } + cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff)) - cmp := s.newValue2(cvttab.ltf, types.Types[types.TBOOL], x, cutoff) + cmp := s.newValueOrSfCall2(cvttab.ltf, types.Types[types.TBOOL], x, cutoff) b := s.endBlock() b.Kind = ssa.BlockIf b.SetControl(cmp) b.Likely = ssa.BranchLikely - bThen := s.f.NewBlock(ssa.BlockPlain) + var bThen, bZero *ssa.Block + // use salted hash to distinguish unsigned convert at a Pos from signed convert at a Pos + newConversion := base.ConvertHash.MatchPosWithInfo(n.Pos(), "U", nil) + if newConversion { + bZero = s.f.NewBlock(ssa.BlockPlain) + bThen = s.f.NewBlock(ssa.BlockIf) + } else { + bThen = s.f.NewBlock(ssa.BlockPlain) + } + bElse := s.f.NewBlock(ssa.BlockPlain) bAfter := s.f.NewBlock(ssa.BlockPlain) b.AddEdgeTo(bThen) s.startBlock(bThen) - a0 := s.newValue1(cvttab.cvt2U, tt, x) + a0 := s.newValueOrSfCall1(cvttab.cvt2U, tt, x) s.vars[n] = a0 - s.endBlock() - bThen.AddEdgeTo(bAfter) + + if newConversion { + cmpz := s.newValueOrSfCall2(cvttab.ltf, types.Types[types.TBOOL], x, cvttab.floatValue(s, ft, 0.0)) + s.endBlock() + bThen.SetControl(cmpz) + bThen.AddEdgeTo(bZero) + bThen.Likely = ssa.BranchUnlikely + bThen.AddEdgeTo(bAfter) + + s.startBlock(bZero) + s.vars[n] = cvttab.intValue(s, tt, 0) + s.endBlock() + bZero.AddEdgeTo(bAfter) + } else { + s.endBlock() + bThen.AddEdgeTo(bAfter) + } b.AddEdgeTo(bElse) s.startBlock(bElse) - y := s.newValue2(cvttab.subf, ft, x, cutoff) - y = s.newValue1(cvttab.cvt2U, tt, y) + y := s.newValueOrSfCall2(cvttab.subf, ft, x, cutoff) + y = s.newValueOrSfCall1(cvttab.cvt2U, tt, y) z := cvttab.intValue(s, tt, int64(-cvttab.cutoff)) a1 := s.newValue2(cvttab.or, tt, y, z) s.vars[n] = a1 @@ -5839,6 +5976,25 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val if n.ITab != nil { targetItab = s.expr(n.ITab) } + + if n.UseNilPanic { + if commaok { + base.Fatalf("unexpected *ir.TypeAssertExpr with UseNilPanic == true && commaok == true") + } + if n.Type().IsInterface() { + // Currently we do not expect the compiler to emit type asserts with UseNilPanic, that assert to an interface type. + // If needed, this can be relaxed in the future, but for now we can assert that. + base.Fatalf("unexpected *ir.TypeAssertExpr with UseNilPanic == true && Type().IsInterface() == true") + } + typs := s.f.Config.Types + iface = s.newValue2( + ssa.OpIMake, + iface.Type, + s.nilCheck(s.newValue1(ssa.OpITab, typs.BytePtr, iface)), + s.newValue1(ssa.OpIData, typs.BytePtr, iface), + ) + } + return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, nil, target, targetItab, commaok, n.Descriptor) } @@ -7570,7 +7726,7 @@ func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t } // Logf logs a message from the compiler. -func (e *ssafn) Logf(msg string, args ...interface{}) { +func (e *ssafn) Logf(msg string, args ...any) { if e.log { fmt.Printf(msg, args...) } @@ -7581,15 +7737,15 @@ func (e *ssafn) Log() bool { } // Fatalf reports a compiler error and exits. -func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) { +func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...any) { base.Pos = pos - nargs := append([]interface{}{ir.FuncName(e.curfn)}, args...) + nargs := append([]any{ir.FuncName(e.curfn)}, args...) base.Fatalf("'%s': "+msg, nargs...) } // Warnl reports a "warning", which is usually flag-triggered // logging output for the benefit of tests. -func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) { +func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...any) { base.WarnfAt(pos, fmt_, args...) } diff --git a/src/cmd/compile/internal/syntax/branches.go b/src/cmd/compile/internal/syntax/branches.go index 8b360176e86..3a2479bb8a1 100644 --- a/src/cmd/compile/internal/syntax/branches.go +++ b/src/cmd/compile/internal/syntax/branches.go @@ -61,7 +61,7 @@ type block struct { lstmt *LabeledStmt // labeled statement associated with this block, or nil } -func (ls *labelScope) errf(pos Pos, format string, args ...interface{}) { +func (ls *labelScope) errf(pos Pos, format string, args ...any) { ls.errh(Error{pos, fmt.Sprintf(format, args...)}) } diff --git a/src/cmd/compile/internal/syntax/dumper.go b/src/cmd/compile/internal/syntax/dumper.go index d5247886dae..9a021a45829 100644 --- a/src/cmd/compile/internal/syntax/dumper.go +++ b/src/cmd/compile/internal/syntax/dumper.go @@ -89,7 +89,7 @@ type writeError struct { } // printf is a convenience wrapper that takes care of print errors. -func (p *dumper) printf(format string, args ...interface{}) { +func (p *dumper) printf(format string, args ...any) { if _, err := fmt.Fprintf(p, format, args...); err != nil { panic(writeError{err}) } diff --git a/src/cmd/compile/internal/syntax/printer.go b/src/cmd/compile/internal/syntax/printer.go index d86d77e73f7..86d93e8932d 100644 --- a/src/cmd/compile/internal/syntax/printer.go +++ b/src/cmd/compile/internal/syntax/printer.go @@ -247,7 +247,7 @@ func mayCombine(prev token, next byte) (b bool) { // return } -func (p *printer) print(args ...interface{}) { +func (p *printer) print(args ...any) { for i := 0; i < len(args); i++ { switch x := args[i].(type) { case nil: @@ -455,7 +455,7 @@ func (p *printer) printRawNode(n Node) { p.printExprList(n.ElemList) case *ArrayType: - var len interface{} = _DotDotDot + var len any = _DotDotDot if n.Len != nil { len = n.Len } diff --git a/src/cmd/compile/internal/syntax/scanner.go b/src/cmd/compile/internal/syntax/scanner.go index 807d8383866..700908f6bda 100644 --- a/src/cmd/compile/internal/syntax/scanner.go +++ b/src/cmd/compile/internal/syntax/scanner.go @@ -50,12 +50,12 @@ func (s *scanner) init(src io.Reader, errh func(line, col uint, msg string), mod } // errorf reports an error at the most recently read character position. -func (s *scanner) errorf(format string, args ...interface{}) { +func (s *scanner) errorf(format string, args ...any) { s.error(fmt.Sprintf(format, args...)) } // errorAtf reports an error at a byte column offset relative to the current token start. -func (s *scanner) errorAtf(offset int, format string, args ...interface{}) { +func (s *scanner) errorAtf(offset int, format string, args ...any) { s.errh(s.line, s.col+uint(offset), fmt.Sprintf(format, args...)) } diff --git a/src/cmd/compile/internal/syntax/syntax.go b/src/cmd/compile/internal/syntax/syntax.go index 83b102da9f5..dd8f2b82004 100644 --- a/src/cmd/compile/internal/syntax/syntax.go +++ b/src/cmd/compile/internal/syntax/syntax.go @@ -36,7 +36,7 @@ type ErrorHandler func(err error) // A Pragma value augments a package, import, const, func, type, or var declaration. // Its meaning is entirely up to the PragmaHandler, // except that nil is used to mean “no pragma seen.” -type Pragma interface{} +type Pragma any // A PragmaHandler is used to process //go: directives while scanning. // It is passed the current pragma value, which starts out being nil, diff --git a/src/cmd/compile/internal/test/divconst_test.go b/src/cmd/compile/internal/test/divconst_test.go index 9358a603749..5e89ce9a3d3 100644 --- a/src/cmd/compile/internal/test/divconst_test.go +++ b/src/cmd/compile/internal/test/divconst_test.go @@ -99,28 +99,28 @@ func BenchmarkDivconstU64(b *testing.B) { x := uint64(123456789123456789) for i := 0; i < b.N; i++ { x += x << 4 - u64res = uint64(x) / 3 + u64res = x / 3 } }) b.Run("5", func(b *testing.B) { x := uint64(123456789123456789) for i := 0; i < b.N; i++ { x += x << 4 - u64res = uint64(x) / 5 + u64res = x / 5 } }) b.Run("37", func(b *testing.B) { x := uint64(123456789123456789) for i := 0; i < b.N; i++ { x += x << 4 - u64res = uint64(x) / 37 + u64res = x / 37 } }) b.Run("1234567", func(b *testing.B) { x := uint64(123456789123456789) for i := 0; i < b.N; i++ { x += x << 4 - u64res = uint64(x) / 1234567 + u64res = x / 1234567 } }) } diff --git a/src/cmd/compile/internal/test/fixedbugs_test.go b/src/cmd/compile/internal/test/fixedbugs_test.go index 8ff7a60aae6..b6d3e248ad0 100644 --- a/src/cmd/compile/internal/test/fixedbugs_test.go +++ b/src/cmd/compile/internal/test/fixedbugs_test.go @@ -24,7 +24,7 @@ func makeT() T { var g T -var sink interface{} +var sink any func TestIssue15854(t *testing.T) { for i := 0; i < 10000; i++ { diff --git a/src/cmd/compile/internal/test/float_test.go b/src/cmd/compile/internal/test/float_test.go index 9e61148c529..00735e3cb11 100644 --- a/src/cmd/compile/internal/test/float_test.go +++ b/src/cmd/compile/internal/test/float_test.go @@ -623,6 +623,169 @@ func TestInf(t *testing.T) { } } +//go:noinline +func isNaNOrGtZero64(x float64) bool { + return math.IsNaN(x) || x > 0 +} + +//go:noinline +func isNaNOrGteZero64(x float64) bool { + return x >= 0 || math.IsNaN(x) +} + +//go:noinline +func isNaNOrLtZero64(x float64) bool { + return x < 0 || math.IsNaN(x) +} + +//go:noinline +func isNaNOrLteZero64(x float64) bool { + return math.IsNaN(x) || x <= 0 +} + +func TestFusedNaNChecks64(t *testing.T) { + tests := []struct { + value float64 + isZero bool + isGreaterThanZero bool + isLessThanZero bool + isNaN bool + }{ + {value: 0.0, isZero: true}, + {value: math.Copysign(0, -1), isZero: true}, + {value: 1.0, isGreaterThanZero: true}, + {value: -1.0, isLessThanZero: true}, + {value: math.Inf(1), isGreaterThanZero: true}, + {value: math.Inf(-1), isLessThanZero: true}, + {value: math.NaN(), isNaN: true}, + } + + check := func(name string, f func(x float64) bool, value float64, want bool) { + got := f(value) + if got != want { + t.Errorf("%v(%g): want %v, got %v", name, value, want, got) + } + } + + for _, test := range tests { + check("isNaNOrGtZero64", isNaNOrGtZero64, test.value, test.isNaN || test.isGreaterThanZero) + check("isNaNOrGteZero64", isNaNOrGteZero64, test.value, test.isNaN || test.isGreaterThanZero || test.isZero) + check("isNaNOrLtZero64", isNaNOrLtZero64, test.value, test.isNaN || test.isLessThanZero) + check("isNaNOrLteZero64", isNaNOrLteZero64, test.value, test.isNaN || test.isLessThanZero || test.isZero) + } +} + +//go:noinline +func isNaNOrGtZero32(x float32) bool { + return x > 0 || x != x +} + +//go:noinline +func isNaNOrGteZero32(x float32) bool { + return x != x || x >= 0 +} + +//go:noinline +func isNaNOrLtZero32(x float32) bool { + return x != x || x < 0 +} + +//go:noinline +func isNaNOrLteZero32(x float32) bool { + return x <= 0 || x != x +} + +func TestFusedNaNChecks32(t *testing.T) { + tests := []struct { + value float32 + isZero bool + isGreaterThanZero bool + isLessThanZero bool + isNaN bool + }{ + {value: 0.0, isZero: true}, + {value: float32(math.Copysign(0, -1)), isZero: true}, + {value: 1.0, isGreaterThanZero: true}, + {value: -1.0, isLessThanZero: true}, + {value: float32(math.Inf(1)), isGreaterThanZero: true}, + {value: float32(math.Inf(-1)), isLessThanZero: true}, + {value: float32(math.NaN()), isNaN: true}, + } + + check := func(name string, f func(x float32) bool, value float32, want bool) { + got := f(value) + if got != want { + t.Errorf("%v(%g): want %v, got %v", name, value, want, got) + } + } + + for _, test := range tests { + check("isNaNOrGtZero32", isNaNOrGtZero32, test.value, test.isNaN || test.isGreaterThanZero) + check("isNaNOrGteZero32", isNaNOrGteZero32, test.value, test.isNaN || test.isGreaterThanZero || test.isZero) + check("isNaNOrLtZero32", isNaNOrLtZero32, test.value, test.isNaN || test.isLessThanZero) + check("isNaNOrLteZero32", isNaNOrLteZero32, test.value, test.isNaN || test.isLessThanZero || test.isZero) + } +} + +// minNormal64 is the smallest float64 value that is not subnormal. +const minNormal64 = 2.2250738585072014e-308 + +//go:noinline +func isAbsLessThanMinNormal64(x float64) bool { + return math.Abs(x) < minNormal64 +} + +//go:noinline +func isLessThanMinNormal64(x float64) bool { + return x < minNormal64 +} + +//go:noinline +func isGreaterThanNegMinNormal64(x float64) bool { + return x > -minNormal64 +} + +//go:noinline +func isGreaterThanOrEqualToMinNormal64(x float64) bool { + return math.Abs(x) >= minNormal64 +} + +func TestSubnormalComparisons(t *testing.T) { + tests := []struct { + value float64 + isAbsLessThanMinNormal bool + isPositive bool + isNegative bool + isNaN bool + }{ + {value: math.Inf(1), isPositive: true}, + {value: math.MaxFloat64, isPositive: true}, + {value: math.Inf(-1), isNegative: true}, + {value: -math.MaxFloat64, isNegative: true}, + {value: math.NaN(), isNaN: true}, + {value: minNormal64, isPositive: true}, + {value: minNormal64 / 2, isAbsLessThanMinNormal: true, isPositive: true}, + {value: -minNormal64, isNegative: true}, + {value: -minNormal64 / 2, isAbsLessThanMinNormal: true, isNegative: true}, + {value: 0, isAbsLessThanMinNormal: true, isPositive: true}, + {value: math.Copysign(0, -1), isAbsLessThanMinNormal: true, isNegative: true}, + } + + check := func(name string, f func(x float64) bool, value float64, want bool) { + got := f(value) + if got != want { + t.Errorf("%v(%g): want %v, got %v", name, value, want, got) + } + } + + for _, test := range tests { + check("isAbsLessThanMinNormal64", isAbsLessThanMinNormal64, test.value, test.isAbsLessThanMinNormal) + check("isLessThanMinNormal64", isLessThanMinNormal64, test.value, test.isAbsLessThanMinNormal || test.isNegative) + check("isGreaterThanNegMinNormal64", isGreaterThanNegMinNormal64, test.value, test.isAbsLessThanMinNormal || test.isPositive) + check("isGreaterThanOrEqualToMinNormal64", isGreaterThanOrEqualToMinNormal64, test.value, !test.isAbsLessThanMinNormal && !test.isNaN) + } +} + var sinkFloat float64 func BenchmarkMul2(b *testing.B) { diff --git a/src/cmd/compile/internal/test/iface_test.go b/src/cmd/compile/internal/test/iface_test.go index db41eb8e55c..cb7dc70c2ff 100644 --- a/src/cmd/compile/internal/test/iface_test.go +++ b/src/cmd/compile/internal/test/iface_test.go @@ -13,7 +13,7 @@ var x int func TestEfaceConv1(t *testing.T) { a := 5 - i := interface{}(a) + i := any(a) a += 2 if got := i.(int); got != 5 { t.Errorf("wanted 5, got %d\n", got) @@ -23,7 +23,7 @@ func TestEfaceConv1(t *testing.T) { func TestEfaceConv2(t *testing.T) { a := 5 sink = &a - i := interface{}(a) + i := any(a) a += 2 if got := i.(int); got != 5 { t.Errorf("wanted 5, got %d\n", got) @@ -38,7 +38,7 @@ func TestEfaceConv3(t *testing.T) { } //go:noinline -func e2int3(i interface{}) int { +func e2int3(i any) int { x = 7 return i.(int) } @@ -51,7 +51,7 @@ func TestEfaceConv4(t *testing.T) { } //go:noinline -func e2int4(i interface{}, p *int) int { +func e2int4(i any, p *int) int { *p = 7 return i.(int) } @@ -69,7 +69,7 @@ func (i Int) foo() { func TestIfaceConv1(t *testing.T) { a := Int(5) - i := interface{}(a) + i := any(a) a += 2 if got := i.(Int); got != 5 { t.Errorf("wanted 5, got %d\n", int(got)) @@ -79,7 +79,7 @@ func TestIfaceConv1(t *testing.T) { func TestIfaceConv2(t *testing.T) { a := Int(5) sink = &a - i := interface{}(a) + i := any(a) a += 2 if got := i.(Int); got != 5 { t.Errorf("wanted 5, got %d\n", int(got)) @@ -121,7 +121,7 @@ func BenchmarkEfaceInteger(b *testing.B) { } //go:noinline -func i2int(i interface{}) int { +func i2int(i any) int { return i.(int) } diff --git a/src/cmd/compile/internal/test/move_test.go b/src/cmd/compile/internal/test/move_test.go new file mode 100644 index 00000000000..f361a865391 --- /dev/null +++ b/src/cmd/compile/internal/test/move_test.go @@ -0,0 +1,55 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import "testing" + +var ( + n = [16]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + m = [16]int{2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32} +) + +func TestEqual(t *testing.T) { + if r := move2(n, m, 0); r != n { + t.Fatalf("%v != %v", r, n) + } + if r := move2(n, m, 1); r != m { + t.Fatalf("%v != %v", r, m) + } + if r := move2p(n, m, 0); r != n { + t.Fatalf("%v != %v", r, n) + } + if r := move2p(n, m, 1); r != m { + t.Fatalf("%v != %v", r, m) + } +} + +//go:noinline +func move2(a, b [16]int, c int) [16]int { + e := a + f := b + var d [16]int + if c%2 == 0 { + d = e + } else { + d = f + } + r := d + return r +} + +//go:noinline +func move2p(a, b [16]int, c int) [16]int { + e := a + f := b + var p *[16]int + if c%2 == 0 { + p = &e + } else { + p = &f + } + r := *p + return r +} diff --git a/src/cmd/compile/internal/test/shift_test.go b/src/cmd/compile/internal/test/shift_test.go index 492379e188c..d540f25c73a 100644 --- a/src/cmd/compile/internal/test/shift_test.go +++ b/src/cmd/compile/internal/test/shift_test.go @@ -916,7 +916,7 @@ func TestShiftGeneric(t *testing.T) { signed bool shiftWidth int left bool - f interface{} + f any }{ {64, true, 64, true, func(n int64, s uint64) int64 { return n << s }}, {64, true, 64, false, func(n int64, s uint64) int64 { return n >> s }}, diff --git a/src/cmd/compile/internal/test/testdata/arith_test.go b/src/cmd/compile/internal/test/testdata/arith_test.go index cd7b5bc2c4a..8984cd3e268 100644 --- a/src/cmd/compile/internal/test/testdata/arith_test.go +++ b/src/cmd/compile/internal/test/testdata/arith_test.go @@ -1390,11 +1390,17 @@ func div19_int64(n int64) bool { return n%19 == 0 } +var ( + // These have to be global to avoid getting constant-folded in the function body: + // as locals, prove can see that they are actually constants. + sixU, nineteenU uint64 = 6, 19 + sixS, nineteenS int64 = 6, 19 +) + // testDivisibility confirms that rewrite rules x%c ==0 for c constant are correct. func testDivisibility(t *testing.T) { // unsigned tests // test an even and an odd divisor - var sixU, nineteenU uint64 = 6, 19 // test all inputs for uint8, uint16 for i := uint64(0); i <= math.MaxUint16; i++ { if i <= math.MaxUint8 { @@ -1402,7 +1408,7 @@ func testDivisibility(t *testing.T) { t.Errorf("div6_uint8(%d) = %v want %v", i, got, want) } if want, got := uint8(i)%uint8(nineteenU) == 0, div19_uint8(uint8(i)); got != want { - t.Errorf("div6_uint19(%d) = %v want %v", i, got, want) + t.Errorf("div19_uint8(%d) = %v want %v", i, got, want) } } if want, got := uint16(i)%uint16(sixU) == 0, div6_uint16(uint16(i)); got != want { @@ -1450,7 +1456,6 @@ func testDivisibility(t *testing.T) { // signed tests // test an even and an odd divisor - var sixS, nineteenS int64 = 6, 19 // test all inputs for int8, int16 for i := int64(math.MinInt16); i <= math.MaxInt16; i++ { if math.MinInt8 <= i && i <= math.MaxInt8 { @@ -1458,7 +1463,7 @@ func testDivisibility(t *testing.T) { t.Errorf("div6_int8(%d) = %v want %v", i, got, want) } if want, got := int8(i)%int8(nineteenS) == 0, div19_int8(int8(i)); got != want { - t.Errorf("div6_int19(%d) = %v want %v", i, got, want) + t.Errorf("div19_int8(%d) = %v want %v", i, got, want) } } if want, got := int16(i)%int16(sixS) == 0, div6_int16(int16(i)); got != want { diff --git a/src/cmd/compile/internal/typecheck/_builtin/runtime.go b/src/cmd/compile/internal/typecheck/_builtin/runtime.go index 1e4d0b7db6e..3c9707252ec 100644 --- a/src/cmd/compile/internal/typecheck/_builtin/runtime.go +++ b/src/cmd/compile/internal/typecheck/_builtin/runtime.go @@ -49,11 +49,13 @@ func goPanicSlice3CU(x uint, y int) func goPanicSliceConvert(x int, y int) func printbool(bool) -func printfloat(float64) +func printfloat64(float64) +func printfloat32(float32) func printint(int64) func printhex(uint64) func printuint(uint64) -func printcomplex(complex128) +func printcomplex128(complex128) +func printcomplex64(complex64) func printstring(string) func printpointer(any) func printuintptr(uintptr) diff --git a/src/cmd/compile/internal/typecheck/builtin.go b/src/cmd/compile/internal/typecheck/builtin.go index 6b8c6d7bad5..eea7fd7d050 100644 --- a/src/cmd/compile/internal/typecheck/builtin.go +++ b/src/cmd/compile/internal/typecheck/builtin.go @@ -56,182 +56,184 @@ var runtimeDecls = [...]struct { {"goPanicSlice3CU", funcTag, 16}, {"goPanicSliceConvert", funcTag, 14}, {"printbool", funcTag, 17}, - {"printfloat", funcTag, 19}, - {"printint", funcTag, 21}, - {"printhex", funcTag, 23}, - {"printuint", funcTag, 23}, - {"printcomplex", funcTag, 25}, - {"printstring", funcTag, 27}, - {"printpointer", funcTag, 28}, - {"printuintptr", funcTag, 29}, - {"printiface", funcTag, 28}, - {"printeface", funcTag, 28}, - {"printslice", funcTag, 28}, + {"printfloat64", funcTag, 19}, + {"printfloat32", funcTag, 21}, + {"printint", funcTag, 23}, + {"printhex", funcTag, 25}, + {"printuint", funcTag, 25}, + {"printcomplex128", funcTag, 27}, + {"printcomplex64", funcTag, 29}, + {"printstring", funcTag, 31}, + {"printpointer", funcTag, 32}, + {"printuintptr", funcTag, 33}, + {"printiface", funcTag, 32}, + {"printeface", funcTag, 32}, + {"printslice", funcTag, 32}, {"printnl", funcTag, 9}, {"printsp", funcTag, 9}, {"printlock", funcTag, 9}, {"printunlock", funcTag, 9}, - {"concatstring2", funcTag, 32}, - {"concatstring3", funcTag, 33}, - {"concatstring4", funcTag, 34}, - {"concatstring5", funcTag, 35}, - {"concatstrings", funcTag, 37}, - {"concatbyte2", funcTag, 39}, - {"concatbyte3", funcTag, 40}, - {"concatbyte4", funcTag, 41}, - {"concatbyte5", funcTag, 42}, - {"concatbytes", funcTag, 43}, - {"cmpstring", funcTag, 44}, - {"intstring", funcTag, 47}, - {"slicebytetostring", funcTag, 48}, - {"slicebytetostringtmp", funcTag, 49}, - {"slicerunetostring", funcTag, 52}, - {"stringtoslicebyte", funcTag, 53}, - {"stringtoslicerune", funcTag, 56}, - {"slicecopy", funcTag, 57}, - {"decoderune", funcTag, 58}, - {"countrunes", funcTag, 59}, - {"convT", funcTag, 60}, - {"convTnoptr", funcTag, 60}, - {"convT16", funcTag, 62}, - {"convT32", funcTag, 64}, - {"convT64", funcTag, 65}, - {"convTstring", funcTag, 66}, - {"convTslice", funcTag, 69}, - {"assertE2I", funcTag, 70}, - {"assertE2I2", funcTag, 70}, - {"panicdottypeE", funcTag, 71}, - {"panicdottypeI", funcTag, 71}, - {"panicnildottype", funcTag, 72}, - {"typeAssert", funcTag, 70}, - {"interfaceSwitch", funcTag, 73}, - {"ifaceeq", funcTag, 75}, - {"efaceeq", funcTag, 75}, - {"panicrangestate", funcTag, 76}, + {"concatstring2", funcTag, 36}, + {"concatstring3", funcTag, 37}, + {"concatstring4", funcTag, 38}, + {"concatstring5", funcTag, 39}, + {"concatstrings", funcTag, 41}, + {"concatbyte2", funcTag, 43}, + {"concatbyte3", funcTag, 44}, + {"concatbyte4", funcTag, 45}, + {"concatbyte5", funcTag, 46}, + {"concatbytes", funcTag, 47}, + {"cmpstring", funcTag, 48}, + {"intstring", funcTag, 51}, + {"slicebytetostring", funcTag, 52}, + {"slicebytetostringtmp", funcTag, 53}, + {"slicerunetostring", funcTag, 56}, + {"stringtoslicebyte", funcTag, 57}, + {"stringtoslicerune", funcTag, 60}, + {"slicecopy", funcTag, 61}, + {"decoderune", funcTag, 62}, + {"countrunes", funcTag, 63}, + {"convT", funcTag, 64}, + {"convTnoptr", funcTag, 64}, + {"convT16", funcTag, 66}, + {"convT32", funcTag, 68}, + {"convT64", funcTag, 69}, + {"convTstring", funcTag, 70}, + {"convTslice", funcTag, 73}, + {"assertE2I", funcTag, 74}, + {"assertE2I2", funcTag, 74}, + {"panicdottypeE", funcTag, 75}, + {"panicdottypeI", funcTag, 75}, + {"panicnildottype", funcTag, 76}, + {"typeAssert", funcTag, 74}, + {"interfaceSwitch", funcTag, 77}, + {"ifaceeq", funcTag, 79}, + {"efaceeq", funcTag, 79}, + {"panicrangestate", funcTag, 80}, {"deferrangefunc", funcTag, 12}, - {"rand", funcTag, 77}, - {"rand32", funcTag, 78}, - {"makemap64", funcTag, 80}, - {"makemap", funcTag, 81}, - {"makemap_small", funcTag, 82}, - {"mapaccess1", funcTag, 83}, - {"mapaccess1_fast32", funcTag, 84}, - {"mapaccess1_fast64", funcTag, 85}, - {"mapaccess1_faststr", funcTag, 86}, - {"mapaccess1_fat", funcTag, 87}, - {"mapaccess2", funcTag, 88}, - {"mapaccess2_fast32", funcTag, 89}, - {"mapaccess2_fast64", funcTag, 90}, - {"mapaccess2_faststr", funcTag, 91}, - {"mapaccess2_fat", funcTag, 92}, - {"mapassign", funcTag, 83}, - {"mapassign_fast32", funcTag, 84}, - {"mapassign_fast32ptr", funcTag, 93}, - {"mapassign_fast64", funcTag, 85}, - {"mapassign_fast64ptr", funcTag, 93}, - {"mapassign_faststr", funcTag, 86}, - {"mapIterStart", funcTag, 94}, - {"mapdelete", funcTag, 94}, - {"mapdelete_fast32", funcTag, 95}, - {"mapdelete_fast64", funcTag, 96}, - {"mapdelete_faststr", funcTag, 97}, - {"mapIterNext", funcTag, 98}, - {"mapclear", funcTag, 99}, - {"makechan64", funcTag, 101}, - {"makechan", funcTag, 102}, - {"chanrecv1", funcTag, 104}, - {"chanrecv2", funcTag, 105}, - {"chansend1", funcTag, 107}, - {"closechan", funcTag, 108}, - {"chanlen", funcTag, 109}, - {"chancap", funcTag, 109}, - {"writeBarrier", varTag, 111}, - {"typedmemmove", funcTag, 112}, - {"typedmemclr", funcTag, 113}, - {"typedslicecopy", funcTag, 114}, - {"selectnbsend", funcTag, 115}, - {"selectnbrecv", funcTag, 116}, - {"selectsetpc", funcTag, 117}, - {"selectgo", funcTag, 118}, + {"rand", funcTag, 81}, + {"rand32", funcTag, 82}, + {"makemap64", funcTag, 84}, + {"makemap", funcTag, 85}, + {"makemap_small", funcTag, 86}, + {"mapaccess1", funcTag, 87}, + {"mapaccess1_fast32", funcTag, 88}, + {"mapaccess1_fast64", funcTag, 89}, + {"mapaccess1_faststr", funcTag, 90}, + {"mapaccess1_fat", funcTag, 91}, + {"mapaccess2", funcTag, 92}, + {"mapaccess2_fast32", funcTag, 93}, + {"mapaccess2_fast64", funcTag, 94}, + {"mapaccess2_faststr", funcTag, 95}, + {"mapaccess2_fat", funcTag, 96}, + {"mapassign", funcTag, 87}, + {"mapassign_fast32", funcTag, 88}, + {"mapassign_fast32ptr", funcTag, 97}, + {"mapassign_fast64", funcTag, 89}, + {"mapassign_fast64ptr", funcTag, 97}, + {"mapassign_faststr", funcTag, 90}, + {"mapIterStart", funcTag, 98}, + {"mapdelete", funcTag, 98}, + {"mapdelete_fast32", funcTag, 99}, + {"mapdelete_fast64", funcTag, 100}, + {"mapdelete_faststr", funcTag, 101}, + {"mapIterNext", funcTag, 102}, + {"mapclear", funcTag, 103}, + {"makechan64", funcTag, 105}, + {"makechan", funcTag, 106}, + {"chanrecv1", funcTag, 108}, + {"chanrecv2", funcTag, 109}, + {"chansend1", funcTag, 111}, + {"closechan", funcTag, 112}, + {"chanlen", funcTag, 113}, + {"chancap", funcTag, 113}, + {"writeBarrier", varTag, 115}, + {"typedmemmove", funcTag, 116}, + {"typedmemclr", funcTag, 117}, + {"typedslicecopy", funcTag, 118}, + {"selectnbsend", funcTag, 119}, + {"selectnbrecv", funcTag, 120}, + {"selectsetpc", funcTag, 121}, + {"selectgo", funcTag, 122}, {"block", funcTag, 9}, - {"makeslice", funcTag, 119}, - {"makeslice64", funcTag, 120}, - {"makeslicecopy", funcTag, 121}, - {"growslice", funcTag, 123}, - {"unsafeslicecheckptr", funcTag, 124}, + {"makeslice", funcTag, 123}, + {"makeslice64", funcTag, 124}, + {"makeslicecopy", funcTag, 125}, + {"growslice", funcTag, 127}, + {"unsafeslicecheckptr", funcTag, 128}, {"panicunsafeslicelen", funcTag, 9}, {"panicunsafeslicenilptr", funcTag, 9}, - {"unsafestringcheckptr", funcTag, 125}, + {"unsafestringcheckptr", funcTag, 129}, {"panicunsafestringlen", funcTag, 9}, {"panicunsafestringnilptr", funcTag, 9}, - {"memmove", funcTag, 126}, - {"memclrNoHeapPointers", funcTag, 127}, - {"memclrHasPointers", funcTag, 127}, - {"memequal", funcTag, 128}, - {"memequal0", funcTag, 129}, - {"memequal8", funcTag, 129}, - {"memequal16", funcTag, 129}, - {"memequal32", funcTag, 129}, - {"memequal64", funcTag, 129}, - {"memequal128", funcTag, 129}, - {"f32equal", funcTag, 130}, - {"f64equal", funcTag, 130}, - {"c64equal", funcTag, 130}, - {"c128equal", funcTag, 130}, - {"strequal", funcTag, 130}, - {"interequal", funcTag, 130}, - {"nilinterequal", funcTag, 130}, - {"memhash", funcTag, 131}, - {"memhash0", funcTag, 132}, - {"memhash8", funcTag, 132}, - {"memhash16", funcTag, 132}, - {"memhash32", funcTag, 132}, - {"memhash64", funcTag, 132}, - {"memhash128", funcTag, 132}, - {"f32hash", funcTag, 133}, - {"f64hash", funcTag, 133}, - {"c64hash", funcTag, 133}, - {"c128hash", funcTag, 133}, - {"strhash", funcTag, 133}, - {"interhash", funcTag, 133}, - {"nilinterhash", funcTag, 133}, - {"int64div", funcTag, 134}, - {"uint64div", funcTag, 135}, - {"int64mod", funcTag, 134}, - {"uint64mod", funcTag, 135}, - {"float64toint64", funcTag, 136}, - {"float64touint64", funcTag, 137}, - {"float64touint32", funcTag, 138}, - {"int64tofloat64", funcTag, 139}, - {"int64tofloat32", funcTag, 141}, - {"uint64tofloat64", funcTag, 142}, - {"uint64tofloat32", funcTag, 143}, - {"uint32tofloat64", funcTag, 144}, - {"complex128div", funcTag, 145}, - {"racefuncenter", funcTag, 29}, + {"memmove", funcTag, 130}, + {"memclrNoHeapPointers", funcTag, 131}, + {"memclrHasPointers", funcTag, 131}, + {"memequal", funcTag, 132}, + {"memequal0", funcTag, 133}, + {"memequal8", funcTag, 133}, + {"memequal16", funcTag, 133}, + {"memequal32", funcTag, 133}, + {"memequal64", funcTag, 133}, + {"memequal128", funcTag, 133}, + {"f32equal", funcTag, 134}, + {"f64equal", funcTag, 134}, + {"c64equal", funcTag, 134}, + {"c128equal", funcTag, 134}, + {"strequal", funcTag, 134}, + {"interequal", funcTag, 134}, + {"nilinterequal", funcTag, 134}, + {"memhash", funcTag, 135}, + {"memhash0", funcTag, 136}, + {"memhash8", funcTag, 136}, + {"memhash16", funcTag, 136}, + {"memhash32", funcTag, 136}, + {"memhash64", funcTag, 136}, + {"memhash128", funcTag, 136}, + {"f32hash", funcTag, 137}, + {"f64hash", funcTag, 137}, + {"c64hash", funcTag, 137}, + {"c128hash", funcTag, 137}, + {"strhash", funcTag, 137}, + {"interhash", funcTag, 137}, + {"nilinterhash", funcTag, 137}, + {"int64div", funcTag, 138}, + {"uint64div", funcTag, 139}, + {"int64mod", funcTag, 138}, + {"uint64mod", funcTag, 139}, + {"float64toint64", funcTag, 140}, + {"float64touint64", funcTag, 141}, + {"float64touint32", funcTag, 142}, + {"int64tofloat64", funcTag, 143}, + {"int64tofloat32", funcTag, 144}, + {"uint64tofloat64", funcTag, 145}, + {"uint64tofloat32", funcTag, 146}, + {"uint32tofloat64", funcTag, 147}, + {"complex128div", funcTag, 148}, + {"racefuncenter", funcTag, 33}, {"racefuncexit", funcTag, 9}, - {"raceread", funcTag, 29}, - {"racewrite", funcTag, 29}, - {"racereadrange", funcTag, 146}, - {"racewriterange", funcTag, 146}, - {"msanread", funcTag, 146}, - {"msanwrite", funcTag, 146}, - {"msanmove", funcTag, 147}, - {"asanread", funcTag, 146}, - {"asanwrite", funcTag, 146}, - {"checkptrAlignment", funcTag, 148}, - {"checkptrArithmetic", funcTag, 150}, - {"libfuzzerTraceCmp1", funcTag, 151}, - {"libfuzzerTraceCmp2", funcTag, 152}, - {"libfuzzerTraceCmp4", funcTag, 153}, - {"libfuzzerTraceCmp8", funcTag, 154}, - {"libfuzzerTraceConstCmp1", funcTag, 151}, - {"libfuzzerTraceConstCmp2", funcTag, 152}, - {"libfuzzerTraceConstCmp4", funcTag, 153}, - {"libfuzzerTraceConstCmp8", funcTag, 154}, - {"libfuzzerHookStrCmp", funcTag, 155}, - {"libfuzzerHookEqualFold", funcTag, 155}, - {"addCovMeta", funcTag, 157}, + {"raceread", funcTag, 33}, + {"racewrite", funcTag, 33}, + {"racereadrange", funcTag, 149}, + {"racewriterange", funcTag, 149}, + {"msanread", funcTag, 149}, + {"msanwrite", funcTag, 149}, + {"msanmove", funcTag, 150}, + {"asanread", funcTag, 149}, + {"asanwrite", funcTag, 149}, + {"checkptrAlignment", funcTag, 151}, + {"checkptrArithmetic", funcTag, 153}, + {"libfuzzerTraceCmp1", funcTag, 154}, + {"libfuzzerTraceCmp2", funcTag, 155}, + {"libfuzzerTraceCmp4", funcTag, 156}, + {"libfuzzerTraceCmp8", funcTag, 157}, + {"libfuzzerTraceConstCmp1", funcTag, 154}, + {"libfuzzerTraceConstCmp2", funcTag, 155}, + {"libfuzzerTraceConstCmp4", funcTag, 156}, + {"libfuzzerTraceConstCmp8", funcTag, 157}, + {"libfuzzerHookStrCmp", funcTag, 158}, + {"libfuzzerHookEqualFold", funcTag, 158}, + {"addCovMeta", funcTag, 160}, {"x86HasAVX", varTag, 6}, {"x86HasFMA", varTag, 6}, {"x86HasPOPCNT", varTag, 6}, @@ -242,11 +244,11 @@ var runtimeDecls = [...]struct { {"loong64HasLAM_BH", varTag, 6}, {"loong64HasLSX", varTag, 6}, {"riscv64HasZbb", varTag, 6}, - {"asanregisterglobals", funcTag, 127}, + {"asanregisterglobals", funcTag, 131}, } func runtimeTypes() []*types.Type { - var typs [158]*types.Type + var typs [161]*types.Type typs[0] = types.ByteType typs[1] = types.NewPtr(typs[0]) typs[2] = types.Types[types.TANY] @@ -267,144 +269,147 @@ func runtimeTypes() []*types.Type { typs[17] = newSig(params(typs[6]), nil) typs[18] = types.Types[types.TFLOAT64] typs[19] = newSig(params(typs[18]), nil) - typs[20] = types.Types[types.TINT64] + typs[20] = types.Types[types.TFLOAT32] typs[21] = newSig(params(typs[20]), nil) - typs[22] = types.Types[types.TUINT64] + typs[22] = types.Types[types.TINT64] typs[23] = newSig(params(typs[22]), nil) - typs[24] = types.Types[types.TCOMPLEX128] + typs[24] = types.Types[types.TUINT64] typs[25] = newSig(params(typs[24]), nil) - typs[26] = types.Types[types.TSTRING] + typs[26] = types.Types[types.TCOMPLEX128] typs[27] = newSig(params(typs[26]), nil) - typs[28] = newSig(params(typs[2]), nil) - typs[29] = newSig(params(typs[5]), nil) - typs[30] = types.NewArray(typs[0], 32) - typs[31] = types.NewPtr(typs[30]) - typs[32] = newSig(params(typs[31], typs[26], typs[26]), params(typs[26])) - typs[33] = newSig(params(typs[31], typs[26], typs[26], typs[26]), params(typs[26])) - typs[34] = newSig(params(typs[31], typs[26], typs[26], typs[26], typs[26]), params(typs[26])) - typs[35] = newSig(params(typs[31], typs[26], typs[26], typs[26], typs[26], typs[26]), params(typs[26])) - typs[36] = types.NewSlice(typs[26]) - typs[37] = newSig(params(typs[31], typs[36]), params(typs[26])) - typs[38] = types.NewSlice(typs[0]) - typs[39] = newSig(params(typs[31], typs[26], typs[26]), params(typs[38])) - typs[40] = newSig(params(typs[31], typs[26], typs[26], typs[26]), params(typs[38])) - typs[41] = newSig(params(typs[31], typs[26], typs[26], typs[26], typs[26]), params(typs[38])) - typs[42] = newSig(params(typs[31], typs[26], typs[26], typs[26], typs[26], typs[26]), params(typs[38])) - typs[43] = newSig(params(typs[31], typs[36]), params(typs[38])) - typs[44] = newSig(params(typs[26], typs[26]), params(typs[13])) - typs[45] = types.NewArray(typs[0], 4) - typs[46] = types.NewPtr(typs[45]) - typs[47] = newSig(params(typs[46], typs[20]), params(typs[26])) - typs[48] = newSig(params(typs[31], typs[1], typs[13]), params(typs[26])) - typs[49] = newSig(params(typs[1], typs[13]), params(typs[26])) - typs[50] = types.RuneType - typs[51] = types.NewSlice(typs[50]) - typs[52] = newSig(params(typs[31], typs[51]), params(typs[26])) - typs[53] = newSig(params(typs[31], typs[26]), params(typs[38])) - typs[54] = types.NewArray(typs[50], 32) - typs[55] = types.NewPtr(typs[54]) - typs[56] = newSig(params(typs[55], typs[26]), params(typs[51])) - typs[57] = newSig(params(typs[3], typs[13], typs[3], typs[13], typs[5]), params(typs[13])) - typs[58] = newSig(params(typs[26], typs[13]), params(typs[50], typs[13])) - typs[59] = newSig(params(typs[26]), params(typs[13])) - typs[60] = newSig(params(typs[1], typs[3]), params(typs[7])) - typs[61] = types.Types[types.TUINT16] - typs[62] = newSig(params(typs[61]), params(typs[7])) - typs[63] = types.Types[types.TUINT32] - typs[64] = newSig(params(typs[63]), params(typs[7])) - typs[65] = newSig(params(typs[22]), params(typs[7])) - typs[66] = newSig(params(typs[26]), params(typs[7])) - typs[67] = types.Types[types.TUINT8] - typs[68] = types.NewSlice(typs[67]) - typs[69] = newSig(params(typs[68]), params(typs[7])) - typs[70] = newSig(params(typs[1], typs[1]), params(typs[1])) - typs[71] = newSig(params(typs[1], typs[1], typs[1]), nil) - typs[72] = newSig(params(typs[1]), nil) - typs[73] = newSig(params(typs[1], typs[1]), params(typs[13], typs[1])) - typs[74] = types.NewPtr(typs[5]) - typs[75] = newSig(params(typs[74], typs[7], typs[7]), params(typs[6])) - typs[76] = newSig(params(typs[13]), nil) - typs[77] = newSig(nil, params(typs[22])) - typs[78] = newSig(nil, params(typs[63])) - typs[79] = types.NewMap(typs[2], typs[2]) - typs[80] = newSig(params(typs[1], typs[20], typs[3]), params(typs[79])) - typs[81] = newSig(params(typs[1], typs[13], typs[3]), params(typs[79])) - typs[82] = newSig(nil, params(typs[79])) - typs[83] = newSig(params(typs[1], typs[79], typs[3]), params(typs[3])) - typs[84] = newSig(params(typs[1], typs[79], typs[63]), params(typs[3])) - typs[85] = newSig(params(typs[1], typs[79], typs[22]), params(typs[3])) - typs[86] = newSig(params(typs[1], typs[79], typs[26]), params(typs[3])) - typs[87] = newSig(params(typs[1], typs[79], typs[3], typs[1]), params(typs[3])) - typs[88] = newSig(params(typs[1], typs[79], typs[3]), params(typs[3], typs[6])) - typs[89] = newSig(params(typs[1], typs[79], typs[63]), params(typs[3], typs[6])) - typs[90] = newSig(params(typs[1], typs[79], typs[22]), params(typs[3], typs[6])) - typs[91] = newSig(params(typs[1], typs[79], typs[26]), params(typs[3], typs[6])) - typs[92] = newSig(params(typs[1], typs[79], typs[3], typs[1]), params(typs[3], typs[6])) - typs[93] = newSig(params(typs[1], typs[79], typs[7]), params(typs[3])) - typs[94] = newSig(params(typs[1], typs[79], typs[3]), nil) - typs[95] = newSig(params(typs[1], typs[79], typs[63]), nil) - typs[96] = newSig(params(typs[1], typs[79], typs[22]), nil) - typs[97] = newSig(params(typs[1], typs[79], typs[26]), nil) - typs[98] = newSig(params(typs[3]), nil) - typs[99] = newSig(params(typs[1], typs[79]), nil) - typs[100] = types.NewChan(typs[2], types.Cboth) - typs[101] = newSig(params(typs[1], typs[20]), params(typs[100])) - typs[102] = newSig(params(typs[1], typs[13]), params(typs[100])) - typs[103] = types.NewChan(typs[2], types.Crecv) - typs[104] = newSig(params(typs[103], typs[3]), nil) - typs[105] = newSig(params(typs[103], typs[3]), params(typs[6])) - typs[106] = types.NewChan(typs[2], types.Csend) - typs[107] = newSig(params(typs[106], typs[3]), nil) - typs[108] = newSig(params(typs[106]), nil) - typs[109] = newSig(params(typs[2]), params(typs[13])) - typs[110] = types.NewArray(typs[0], 3) - typs[111] = types.NewStruct([]*types.Field{types.NewField(src.NoXPos, Lookup("enabled"), typs[6]), types.NewField(src.NoXPos, Lookup("pad"), typs[110]), types.NewField(src.NoXPos, Lookup("cgo"), typs[6]), types.NewField(src.NoXPos, Lookup("alignme"), typs[22])}) - typs[112] = newSig(params(typs[1], typs[3], typs[3]), nil) - typs[113] = newSig(params(typs[1], typs[3]), nil) - typs[114] = newSig(params(typs[1], typs[3], typs[13], typs[3], typs[13]), params(typs[13])) - typs[115] = newSig(params(typs[106], typs[3]), params(typs[6])) - typs[116] = newSig(params(typs[3], typs[103]), params(typs[6], typs[6])) - typs[117] = newSig(params(typs[74]), nil) - typs[118] = newSig(params(typs[1], typs[1], typs[74], typs[13], typs[13], typs[6]), params(typs[13], typs[6])) - typs[119] = newSig(params(typs[1], typs[13], typs[13]), params(typs[7])) - typs[120] = newSig(params(typs[1], typs[20], typs[20]), params(typs[7])) - typs[121] = newSig(params(typs[1], typs[13], typs[13], typs[7]), params(typs[7])) - typs[122] = types.NewSlice(typs[2]) - typs[123] = newSig(params(typs[3], typs[13], typs[13], typs[13], typs[1]), params(typs[122])) - typs[124] = newSig(params(typs[1], typs[7], typs[20]), nil) - typs[125] = newSig(params(typs[7], typs[20]), nil) - typs[126] = newSig(params(typs[3], typs[3], typs[5]), nil) - typs[127] = newSig(params(typs[7], typs[5]), nil) - typs[128] = newSig(params(typs[3], typs[3], typs[5]), params(typs[6])) - typs[129] = newSig(params(typs[3], typs[3]), params(typs[6])) - typs[130] = newSig(params(typs[7], typs[7]), params(typs[6])) - typs[131] = newSig(params(typs[3], typs[5], typs[5]), params(typs[5])) - typs[132] = newSig(params(typs[7], typs[5]), params(typs[5])) - typs[133] = newSig(params(typs[3], typs[5]), params(typs[5])) - typs[134] = newSig(params(typs[20], typs[20]), params(typs[20])) - typs[135] = newSig(params(typs[22], typs[22]), params(typs[22])) - typs[136] = newSig(params(typs[18]), params(typs[20])) - typs[137] = newSig(params(typs[18]), params(typs[22])) - typs[138] = newSig(params(typs[18]), params(typs[63])) - typs[139] = newSig(params(typs[20]), params(typs[18])) - typs[140] = types.Types[types.TFLOAT32] - typs[141] = newSig(params(typs[20]), params(typs[140])) - typs[142] = newSig(params(typs[22]), params(typs[18])) - typs[143] = newSig(params(typs[22]), params(typs[140])) - typs[144] = newSig(params(typs[63]), params(typs[18])) - typs[145] = newSig(params(typs[24], typs[24]), params(typs[24])) - typs[146] = newSig(params(typs[5], typs[5]), nil) - typs[147] = newSig(params(typs[5], typs[5], typs[5]), nil) - typs[148] = newSig(params(typs[7], typs[1], typs[5]), nil) - typs[149] = types.NewSlice(typs[7]) - typs[150] = newSig(params(typs[7], typs[149]), nil) - typs[151] = newSig(params(typs[67], typs[67], typs[15]), nil) - typs[152] = newSig(params(typs[61], typs[61], typs[15]), nil) - typs[153] = newSig(params(typs[63], typs[63], typs[15]), nil) - typs[154] = newSig(params(typs[22], typs[22], typs[15]), nil) - typs[155] = newSig(params(typs[26], typs[26], typs[15]), nil) - typs[156] = types.NewArray(typs[0], 16) - typs[157] = newSig(params(typs[7], typs[63], typs[156], typs[26], typs[13], typs[67], typs[67]), params(typs[63])) + typs[28] = types.Types[types.TCOMPLEX64] + typs[29] = newSig(params(typs[28]), nil) + typs[30] = types.Types[types.TSTRING] + typs[31] = newSig(params(typs[30]), nil) + typs[32] = newSig(params(typs[2]), nil) + typs[33] = newSig(params(typs[5]), nil) + typs[34] = types.NewArray(typs[0], 32) + typs[35] = types.NewPtr(typs[34]) + typs[36] = newSig(params(typs[35], typs[30], typs[30]), params(typs[30])) + typs[37] = newSig(params(typs[35], typs[30], typs[30], typs[30]), params(typs[30])) + typs[38] = newSig(params(typs[35], typs[30], typs[30], typs[30], typs[30]), params(typs[30])) + typs[39] = newSig(params(typs[35], typs[30], typs[30], typs[30], typs[30], typs[30]), params(typs[30])) + typs[40] = types.NewSlice(typs[30]) + typs[41] = newSig(params(typs[35], typs[40]), params(typs[30])) + typs[42] = types.NewSlice(typs[0]) + typs[43] = newSig(params(typs[35], typs[30], typs[30]), params(typs[42])) + typs[44] = newSig(params(typs[35], typs[30], typs[30], typs[30]), params(typs[42])) + typs[45] = newSig(params(typs[35], typs[30], typs[30], typs[30], typs[30]), params(typs[42])) + typs[46] = newSig(params(typs[35], typs[30], typs[30], typs[30], typs[30], typs[30]), params(typs[42])) + typs[47] = newSig(params(typs[35], typs[40]), params(typs[42])) + typs[48] = newSig(params(typs[30], typs[30]), params(typs[13])) + typs[49] = types.NewArray(typs[0], 4) + typs[50] = types.NewPtr(typs[49]) + typs[51] = newSig(params(typs[50], typs[22]), params(typs[30])) + typs[52] = newSig(params(typs[35], typs[1], typs[13]), params(typs[30])) + typs[53] = newSig(params(typs[1], typs[13]), params(typs[30])) + typs[54] = types.RuneType + typs[55] = types.NewSlice(typs[54]) + typs[56] = newSig(params(typs[35], typs[55]), params(typs[30])) + typs[57] = newSig(params(typs[35], typs[30]), params(typs[42])) + typs[58] = types.NewArray(typs[54], 32) + typs[59] = types.NewPtr(typs[58]) + typs[60] = newSig(params(typs[59], typs[30]), params(typs[55])) + typs[61] = newSig(params(typs[3], typs[13], typs[3], typs[13], typs[5]), params(typs[13])) + typs[62] = newSig(params(typs[30], typs[13]), params(typs[54], typs[13])) + typs[63] = newSig(params(typs[30]), params(typs[13])) + typs[64] = newSig(params(typs[1], typs[3]), params(typs[7])) + typs[65] = types.Types[types.TUINT16] + typs[66] = newSig(params(typs[65]), params(typs[7])) + typs[67] = types.Types[types.TUINT32] + typs[68] = newSig(params(typs[67]), params(typs[7])) + typs[69] = newSig(params(typs[24]), params(typs[7])) + typs[70] = newSig(params(typs[30]), params(typs[7])) + typs[71] = types.Types[types.TUINT8] + typs[72] = types.NewSlice(typs[71]) + typs[73] = newSig(params(typs[72]), params(typs[7])) + typs[74] = newSig(params(typs[1], typs[1]), params(typs[1])) + typs[75] = newSig(params(typs[1], typs[1], typs[1]), nil) + typs[76] = newSig(params(typs[1]), nil) + typs[77] = newSig(params(typs[1], typs[1]), params(typs[13], typs[1])) + typs[78] = types.NewPtr(typs[5]) + typs[79] = newSig(params(typs[78], typs[7], typs[7]), params(typs[6])) + typs[80] = newSig(params(typs[13]), nil) + typs[81] = newSig(nil, params(typs[24])) + typs[82] = newSig(nil, params(typs[67])) + typs[83] = types.NewMap(typs[2], typs[2]) + typs[84] = newSig(params(typs[1], typs[22], typs[3]), params(typs[83])) + typs[85] = newSig(params(typs[1], typs[13], typs[3]), params(typs[83])) + typs[86] = newSig(nil, params(typs[83])) + typs[87] = newSig(params(typs[1], typs[83], typs[3]), params(typs[3])) + typs[88] = newSig(params(typs[1], typs[83], typs[67]), params(typs[3])) + typs[89] = newSig(params(typs[1], typs[83], typs[24]), params(typs[3])) + typs[90] = newSig(params(typs[1], typs[83], typs[30]), params(typs[3])) + typs[91] = newSig(params(typs[1], typs[83], typs[3], typs[1]), params(typs[3])) + typs[92] = newSig(params(typs[1], typs[83], typs[3]), params(typs[3], typs[6])) + typs[93] = newSig(params(typs[1], typs[83], typs[67]), params(typs[3], typs[6])) + typs[94] = newSig(params(typs[1], typs[83], typs[24]), params(typs[3], typs[6])) + typs[95] = newSig(params(typs[1], typs[83], typs[30]), params(typs[3], typs[6])) + typs[96] = newSig(params(typs[1], typs[83], typs[3], typs[1]), params(typs[3], typs[6])) + typs[97] = newSig(params(typs[1], typs[83], typs[7]), params(typs[3])) + typs[98] = newSig(params(typs[1], typs[83], typs[3]), nil) + typs[99] = newSig(params(typs[1], typs[83], typs[67]), nil) + typs[100] = newSig(params(typs[1], typs[83], typs[24]), nil) + typs[101] = newSig(params(typs[1], typs[83], typs[30]), nil) + typs[102] = newSig(params(typs[3]), nil) + typs[103] = newSig(params(typs[1], typs[83]), nil) + typs[104] = types.NewChan(typs[2], types.Cboth) + typs[105] = newSig(params(typs[1], typs[22]), params(typs[104])) + typs[106] = newSig(params(typs[1], typs[13]), params(typs[104])) + typs[107] = types.NewChan(typs[2], types.Crecv) + typs[108] = newSig(params(typs[107], typs[3]), nil) + typs[109] = newSig(params(typs[107], typs[3]), params(typs[6])) + typs[110] = types.NewChan(typs[2], types.Csend) + typs[111] = newSig(params(typs[110], typs[3]), nil) + typs[112] = newSig(params(typs[110]), nil) + typs[113] = newSig(params(typs[2]), params(typs[13])) + typs[114] = types.NewArray(typs[0], 3) + typs[115] = types.NewStruct([]*types.Field{types.NewField(src.NoXPos, Lookup("enabled"), typs[6]), types.NewField(src.NoXPos, Lookup("pad"), typs[114]), types.NewField(src.NoXPos, Lookup("cgo"), typs[6]), types.NewField(src.NoXPos, Lookup("alignme"), typs[24])}) + typs[116] = newSig(params(typs[1], typs[3], typs[3]), nil) + typs[117] = newSig(params(typs[1], typs[3]), nil) + typs[118] = newSig(params(typs[1], typs[3], typs[13], typs[3], typs[13]), params(typs[13])) + typs[119] = newSig(params(typs[110], typs[3]), params(typs[6])) + typs[120] = newSig(params(typs[3], typs[107]), params(typs[6], typs[6])) + typs[121] = newSig(params(typs[78]), nil) + typs[122] = newSig(params(typs[1], typs[1], typs[78], typs[13], typs[13], typs[6]), params(typs[13], typs[6])) + typs[123] = newSig(params(typs[1], typs[13], typs[13]), params(typs[7])) + typs[124] = newSig(params(typs[1], typs[22], typs[22]), params(typs[7])) + typs[125] = newSig(params(typs[1], typs[13], typs[13], typs[7]), params(typs[7])) + typs[126] = types.NewSlice(typs[2]) + typs[127] = newSig(params(typs[3], typs[13], typs[13], typs[13], typs[1]), params(typs[126])) + typs[128] = newSig(params(typs[1], typs[7], typs[22]), nil) + typs[129] = newSig(params(typs[7], typs[22]), nil) + typs[130] = newSig(params(typs[3], typs[3], typs[5]), nil) + typs[131] = newSig(params(typs[7], typs[5]), nil) + typs[132] = newSig(params(typs[3], typs[3], typs[5]), params(typs[6])) + typs[133] = newSig(params(typs[3], typs[3]), params(typs[6])) + typs[134] = newSig(params(typs[7], typs[7]), params(typs[6])) + typs[135] = newSig(params(typs[3], typs[5], typs[5]), params(typs[5])) + typs[136] = newSig(params(typs[7], typs[5]), params(typs[5])) + typs[137] = newSig(params(typs[3], typs[5]), params(typs[5])) + typs[138] = newSig(params(typs[22], typs[22]), params(typs[22])) + typs[139] = newSig(params(typs[24], typs[24]), params(typs[24])) + typs[140] = newSig(params(typs[18]), params(typs[22])) + typs[141] = newSig(params(typs[18]), params(typs[24])) + typs[142] = newSig(params(typs[18]), params(typs[67])) + typs[143] = newSig(params(typs[22]), params(typs[18])) + typs[144] = newSig(params(typs[22]), params(typs[20])) + typs[145] = newSig(params(typs[24]), params(typs[18])) + typs[146] = newSig(params(typs[24]), params(typs[20])) + typs[147] = newSig(params(typs[67]), params(typs[18])) + typs[148] = newSig(params(typs[26], typs[26]), params(typs[26])) + typs[149] = newSig(params(typs[5], typs[5]), nil) + typs[150] = newSig(params(typs[5], typs[5], typs[5]), nil) + typs[151] = newSig(params(typs[7], typs[1], typs[5]), nil) + typs[152] = types.NewSlice(typs[7]) + typs[153] = newSig(params(typs[7], typs[152]), nil) + typs[154] = newSig(params(typs[71], typs[71], typs[15]), nil) + typs[155] = newSig(params(typs[65], typs[65], typs[15]), nil) + typs[156] = newSig(params(typs[67], typs[67], typs[15]), nil) + typs[157] = newSig(params(typs[24], typs[24], typs[15]), nil) + typs[158] = newSig(params(typs[30], typs[30], typs[15]), nil) + typs[159] = types.NewArray(typs[0], 16) + typs[160] = newSig(params(typs[7], typs[67], typs[159], typs[30], typs[13], typs[71], typs[71]), params(typs[67])) return typs[:] } diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index c97220e6c23..d7aad9c6b96 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -713,7 +713,7 @@ func implicitstar(n ir.Node) ir.Node { return Expr(star) } -func needOneArg(n *ir.CallExpr, f string, args ...interface{}) (ir.Node, bool) { +func needOneArg(n *ir.CallExpr, f string, args ...any) (ir.Node, bool) { if len(n.Args) == 0 { p := fmt.Sprintf(f, args...) base.Errorf("missing argument to %s: %v", p, n) diff --git a/src/cmd/compile/internal/types/fmt.go b/src/cmd/compile/internal/types/fmt.go index 67e2e99f02f..848720ee896 100644 --- a/src/cmd/compile/internal/types/fmt.go +++ b/src/cmd/compile/internal/types/fmt.go @@ -183,7 +183,7 @@ var BasicTypeNames = []string{ } var fmtBufferPool = sync.Pool{ - New: func() interface{} { + New: func() any { return new(bytes.Buffer) }, } diff --git a/src/cmd/compile/internal/types/sizeof_test.go b/src/cmd/compile/internal/types/sizeof_test.go index ba033ec499b..1b80659c8ed 100644 --- a/src/cmd/compile/internal/types/sizeof_test.go +++ b/src/cmd/compile/internal/types/sizeof_test.go @@ -16,9 +16,9 @@ func TestSizeof(t *testing.T) { const _64bit = unsafe.Sizeof(uintptr(0)) == 8 var tests = []struct { - val interface{} // type as a value - _32bit uintptr // size on 32bit platforms - _64bit uintptr // size on 64bit platforms + val any // type as a value + _32bit uintptr // size on 32bit platforms + _64bit uintptr // size on 64bit platforms }{ {Sym{}, 32, 64}, {Type{}, 60, 96}, diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index fc2c0435bdf..e8aca900817 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -174,7 +174,7 @@ type Type struct { // TARRAY: *Array // TSLICE: Slice // TSSA: string - extra interface{} + extra any // width is the width of this Type in bytes. width int64 // valid if Align > 0 @@ -329,7 +329,7 @@ func (t *Type) funcType() *Func { return t.extra.(*Func) } -// StructType contains Type fields specific to struct types. +// Struct contains Type fields specific to struct types. type Struct struct { fields fields diff --git a/src/cmd/compile/internal/types2/alias.go b/src/cmd/compile/internal/types2/alias.go index 90dda18cc88..d306600ebd2 100644 --- a/src/cmd/compile/internal/types2/alias.go +++ b/src/cmd/compile/internal/types2/alias.go @@ -113,7 +113,6 @@ func unalias(a0 *Alias) Type { for a := a0; a != nil; a, _ = t.(*Alias) { t = a.fromRHS } - // It's fine to memoize nil types since it's the zero value for actual. // It accomplishes nothing. a0.actual = t diff --git a/src/cmd/compile/internal/types2/api_test.go b/src/cmd/compile/internal/types2/api_test.go index 0d3c8b8e3e5..4b7012e6c45 100644 --- a/src/cmd/compile/internal/types2/api_test.go +++ b/src/cmd/compile/internal/types2/api_test.go @@ -2468,8 +2468,8 @@ func TestInstantiateErrors(t *testing.T) { t.Fatalf("Instantiate(%v, %v) returned nil error, want non-nil", T, test.targs) } - var argErr *ArgumentError - if !errors.As(err, &argErr) { + argErr, ok := errors.AsType[*ArgumentError](err) + if !ok { t.Fatalf("Instantiate(%v, %v): error is not an *ArgumentError", T, test.targs) } @@ -2484,8 +2484,8 @@ func TestArgumentErrorUnwrapping(t *testing.T) { Index: 1, Err: Error{Msg: "test"}, } - var e Error - if !errors.As(err, &e) { + e, ok := errors.AsType[Error](err) + if !ok { t.Fatalf("error %v does not wrap types.Error", err) } if e.Msg != "test" { diff --git a/src/cmd/compile/internal/types2/assignments.go b/src/cmd/compile/internal/types2/assignments.go index 8af5f4037a5..87f5c8beeaf 100644 --- a/src/cmd/compile/internal/types2/assignments.go +++ b/src/cmd/compile/internal/types2/assignments.go @@ -91,7 +91,7 @@ func (check *Checker) assignment(x *operand, T Type, context string) { // x.typ is typed // A generic (non-instantiated) function value cannot be assigned to a variable. - if sig, _ := under(x.typ).(*Signature); sig != nil && sig.TypeParams().Len() > 0 { + if sig, _ := x.typ.Underlying().(*Signature); sig != nil && sig.TypeParams().Len() > 0 { check.errorf(x, WrongTypeArgCount, "cannot use generic function %s without instantiation in %s", x, context) x.mode = invalid return @@ -261,7 +261,7 @@ func (check *Checker) assignVar(lhs, rhs syntax.Expr, x *operand, context string var target *target // avoid calling ExprString if not needed if T != nil { - if _, ok := under(T).(*Signature); ok { + if _, ok := T.Underlying().(*Signature); ok { target = newTarget(T, ExprString(lhs)) } } diff --git a/src/cmd/compile/internal/types2/builtins.go b/src/cmd/compile/internal/types2/builtins.go index df207a2746d..549d94615bc 100644 --- a/src/cmd/compile/internal/types2/builtins.go +++ b/src/cmd/compile/internal/types2/builtins.go @@ -91,6 +91,17 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( // to type []byte with a second argument of string type followed by ... . // This form appends the bytes of the string." + // In either case, the first argument must be a slice; in particular it + // cannot be the predeclared nil value. Note that nil is not excluded by + // the assignability requirement alone for the special case (go.dev/issue/76220). + // spec: "If S is a type parameter, all types in its type set + // must have the same underlying slice type []E." + E, err := sliceElem(x) + if err != nil { + check.errorf(x, InvalidAppend, "invalid append: %s", err.format(check)) + return + } + // Handle append(bytes, y...) special case, where // the type set of y is {string} or {string, []byte}. var sig *Signature @@ -119,13 +130,6 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( // general case if sig == nil { - // spec: "If S is a type parameter, all types in its type set - // must have the same underlying slice type []E." - E, err := sliceElem(x) - if err != nil { - check.errorf(x, InvalidAppend, "invalid append: %s", err.format(check)) - return - } // check arguments by creating custom signature sig = makeSig(x.typ, x.typ, NewSlice(E)) // []E required for variadic signature sig.variadic = true @@ -144,7 +148,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( // len(x) mode := invalid var val constant.Value - switch t := arrayPtrDeref(under(x.typ)).(type) { + switch t := arrayPtrDeref(x.typ.Underlying()).(type) { case *Basic: if isString(t) && id == _Len { if x.mode == constant_ { @@ -203,7 +207,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( if mode == invalid { // avoid error if underlying type is invalid - if isValid(under(x.typ)) { + if isValid(x.typ.Underlying()) { code := InvalidCap if id == _Len { code = InvalidLen @@ -322,7 +326,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( // (applyTypeFunc never calls f with a type parameter) f := func(typ Type) Type { assert(!isTypeParam(typ)) - if t, _ := under(typ).(*Basic); t != nil { + if t, _ := typ.Underlying().(*Basic); t != nil { switch t.kind { case Float32: return Typ[Complex64] @@ -472,7 +476,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( // (applyTypeFunc never calls f with a type parameter) f := func(typ Type) Type { assert(!isTypeParam(typ)) - if t, _ := under(typ).(*Basic); t != nil { + if t, _ := typ.Underlying().(*Basic); t != nil { switch t.kind { case Complex64: return Typ[Float32] @@ -639,31 +643,31 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( // new(T) or new(expr) // (no argument evaluated yet) arg := argList[0] - check.exprOrType(x, arg, true) - var T Type + check.exprOrType(x, arg, false) + check.exclude(x, 1< want { @@ -205,7 +205,7 @@ func (check *Checker) callExpr(x *operand, call *syntax.CallExpr) exprKind { case 1: check.expr(nil, x, call.ArgList[0]) if x.mode != invalid { - if t, _ := under(T).(*Interface); t != nil && !isTypeParam(T) { + if t, _ := T.Underlying().(*Interface); t != nil && !isTypeParam(T) { if !t.IsMethodSet() { check.errorf(call, MisplacedConstraintIface, "cannot use interface %s in conversion (contains specific type constraints or is comparable)", T) break @@ -812,7 +812,7 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, def *TypeName obj, index, indirect = lookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel, false) if obj == nil { // Don't report another error if the underlying type was invalid (go.dev/issue/49541). - if !isValid(under(x.typ)) { + if !isValid(x.typ.Underlying()) { goto Error } @@ -931,6 +931,7 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, def *TypeName Error: x.mode = invalid + x.typ = Typ[Invalid] x.expr = e } diff --git a/src/cmd/compile/internal/types2/check.go b/src/cmd/compile/internal/types2/check.go index 411a1719ce4..25cda4f73dd 100644 --- a/src/cmd/compile/internal/types2/check.go +++ b/src/cmd/compile/internal/types2/check.go @@ -118,7 +118,7 @@ type action struct { // If debug is set, describef sets a printf-formatted description for action a. // Otherwise, it is a no-op. -func (a *action) describef(pos poser, format string, args ...interface{}) { +func (a *action) describef(pos poser, format string, args ...any) { if debug { a.desc = &actionDesc{pos, format, args} } @@ -129,7 +129,7 @@ func (a *action) describef(pos poser, format string, args ...interface{}) { type actionDesc struct { pos poser format string - args []interface{} + args []any } // A Checker maintains the state of the type checker. @@ -141,9 +141,10 @@ type Checker struct { ctxt *Context // context for de-duplicating instances pkg *Package *Info - nextID uint64 // unique Id for type parameters (first valid Id is 1) - objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info - impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package + nextID uint64 // unique Id for type parameters (first valid Id is 1) + objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info + objList []Object // source-ordered keys of objMap + impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package // see TODO in validtype.go // valids instanceLookup // valid *Named (incl. instantiated) types per the validType check @@ -493,6 +494,12 @@ func (check *Checker) checkFiles(files []*syntax.File) { print("== collectObjects ==") check.collectObjects() + print("== sortObjects ==") + check.sortObjects() + + print("== directCycles ==") + check.directCycles() + print("== packageObjects ==") check.packageObjects() diff --git a/src/cmd/compile/internal/types2/const.go b/src/cmd/compile/internal/types2/const.go index 5e5bc74ba34..b68d72de4d2 100644 --- a/src/cmd/compile/internal/types2/const.go +++ b/src/cmd/compile/internal/types2/const.go @@ -33,7 +33,7 @@ func (check *Checker) overflow(x *operand, opPos syntax.Pos) { // x.typ cannot be a type parameter (type // parameters cannot be constant types). if isTyped(x.typ) { - check.representable(x, under(x.typ).(*Basic)) + check.representable(x, x.typ.Underlying().(*Basic)) return } diff --git a/src/cmd/compile/internal/types2/conversions.go b/src/cmd/compile/internal/types2/conversions.go index 0ad79afe71c..d0920d7ef10 100644 --- a/src/cmd/compile/internal/types2/conversions.go +++ b/src/cmd/compile/internal/types2/conversions.go @@ -18,7 +18,7 @@ func (check *Checker) conversion(x *operand, T Type) { constArg := x.mode == constant_ constConvertibleTo := func(T Type, val *constant.Value) bool { - switch t, _ := under(T).(*Basic); { + switch t, _ := T.Underlying().(*Basic); { case t == nil: // nothing to do case representableConst(x.val, check, t, val): @@ -142,8 +142,8 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool { origT := T V := Unalias(x.typ) T = Unalias(T) - Vu := under(V) - Tu := under(T) + Vu := V.Underlying() + Tu := T.Underlying() Vp, _ := V.(*TypeParam) Tp, _ := T.(*TypeParam) @@ -158,7 +158,7 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool { // and their pointer base types are not type parameters" if V, ok := V.(*Pointer); ok { if T, ok := T.(*Pointer); ok { - if IdenticalIgnoreTags(under(V.base), under(T.base)) && !isTypeParam(V.base) && !isTypeParam(T.base) { + if IdenticalIgnoreTags(V.base.Underlying(), T.base.Underlying()) && !isTypeParam(V.base) && !isTypeParam(T.base) { return true } } @@ -211,7 +211,7 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool { return false } case *Pointer: - if a, _ := under(a.Elem()).(*Array); a != nil { + if a, _ := a.Elem().Underlying().(*Array); a != nil { if Identical(s.Elem(), a.Elem()) { if check == nil || check.allowVersion(go1_17) { return true @@ -292,23 +292,23 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool { } func isUintptr(typ Type) bool { - t, _ := under(typ).(*Basic) + t, _ := typ.Underlying().(*Basic) return t != nil && t.kind == Uintptr } func isUnsafePointer(typ Type) bool { - t, _ := under(typ).(*Basic) + t, _ := typ.Underlying().(*Basic) return t != nil && t.kind == UnsafePointer } func isPointer(typ Type) bool { - _, ok := under(typ).(*Pointer) + _, ok := typ.Underlying().(*Pointer) return ok } func isBytesOrRunes(typ Type) bool { - if s, _ := under(typ).(*Slice); s != nil { - t, _ := under(s.elem).(*Basic) + if s, _ := typ.Underlying().(*Slice); s != nil { + t, _ := s.elem.Underlying().(*Basic) return t != nil && (t.kind == Byte || t.kind == Rune) } return false diff --git a/src/cmd/compile/internal/types2/cycles.go b/src/cmd/compile/internal/types2/cycles.go new file mode 100644 index 00000000000..fa739a2b847 --- /dev/null +++ b/src/cmd/compile/internal/types2/cycles.go @@ -0,0 +1,105 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import "cmd/compile/internal/syntax" + +// directCycles searches for direct cycles among package level type declarations. +// See directCycle for details. +func (check *Checker) directCycles() { + pathIdx := make(map[*TypeName]int) + for _, obj := range check.objList { + if tname, ok := obj.(*TypeName); ok { + check.directCycle(tname, pathIdx) + } + } +} + +// directCycle checks if the declaration of the type given by tname contains a direct cycle. +// A direct cycle exists if the path from tname's declaration's RHS leads from type name to +// type name and eventually ends up on that path again, via regular or alias declarations; +// in other words if there are no type literals (or basic types) on the path, and the path +// doesn't end in an undeclared object. +// If a cycle is detected, a cycle error is reported and the type at the start of the cycle +// is marked as invalid. +// +// The pathIdx map tracks which type names have been processed. An entry can be +// in 1 of 3 states as used in a typical 3-state (white/grey/black) graph marking +// algorithm for cycle detection: +// +// - entry not found: tname has not been seen before (white) +// - value is >= 0 : tname has been seen but is not done (grey); the value is the path index +// - value is < 0 : tname has been seen and is done (black) +// +// When directCycle returns, the pathIdx entries for all type names on the path +// that starts at tname are marked black, regardless of whether there was a cycle. +// This ensures that a type name is traversed only once. +func (check *Checker) directCycle(tname *TypeName, pathIdx map[*TypeName]int) { + if debug && check.conf.Trace { + check.trace(tname.Pos(), "-- check direct cycle for %s", tname) + } + + var path []*TypeName + for { + start, found := pathIdx[tname] + if start < 0 { + // tname is marked black - do not traverse it again. + // (start can only be < 0 if it was found in the first place) + break + } + + if found { + // tname is marked grey - we have a cycle on the path beginning at start. + // Mark tname as invalid. + tname.setType(Typ[Invalid]) + tname.setColor(black) + + // collect type names on cycle + var cycle []Object + for _, tname := range path[start:] { + cycle = append(cycle, tname) + } + + check.cycleError(cycle, firstInSrc(cycle)) + break + } + + // tname is marked white - mark it grey and add it to the path. + pathIdx[tname] = len(path) + path = append(path, tname) + + // For direct cycle detection, we don't care about whether we have an alias or not. + // If the associated type is not a name, we're at the end of the path and we're done. + rhs, ok := check.objMap[tname].tdecl.Type.(*syntax.Name) + if !ok { + break + } + + // Determine the RHS type. If it is not found in the package scope, we either + // have an error (which will be reported later), or the type exists elsewhere + // (universe scope, file scope via dot-import) and a cycle is not possible in + // the first place. If it is not a type name, we cannot have a direct cycle + // either. In all these cases we can stop. + tname1, ok := check.pkg.scope.Lookup(rhs.Value).(*TypeName) + if !ok { + break + } + + // Otherwise, continue with the RHS. + tname = tname1 + } + + // Mark all traversed type names black. + // (ensure that pathIdx doesn't contain any grey entries upon returning) + for _, tname := range path { + pathIdx[tname] = -1 + } + + if debug { + for _, i := range pathIdx { + assert(i < 0) + } + } +} diff --git a/src/cmd/compile/internal/types2/decl.go b/src/cmd/compile/internal/types2/decl.go index 34105816a65..91d2492a532 100644 --- a/src/cmd/compile/internal/types2/decl.go +++ b/src/cmd/compile/internal/types2/decl.go @@ -225,8 +225,8 @@ func (check *Checker) validCycle(obj Object) (valid bool) { start := obj.color() - grey // index of obj in objPath cycle := check.objPath[start:] tparCycle := false // if set, the cycle is through a type parameter list - nval := 0 // number of (constant or variable) values in the cycle; valid if !generic - ndef := 0 // number of type definitions in the cycle; valid if !generic + nval := 0 // number of (constant or variable) values in the cycle + ndef := 0 // number of type definitions in the cycle loop: for _, obj := range cycle { switch obj := obj.(type) { @@ -235,7 +235,7 @@ loop: case *TypeName: // If we reach a generic type that is part of a cycle // and we are in a type parameter list, we have a cycle - // through a type parameter list, which is invalid. + // through a type parameter list. if check.inTParamList && isGeneric(obj.typ) { tparCycle = true break loop @@ -286,20 +286,23 @@ loop: }() } - if !tparCycle { - // A cycle involving only constants and variables is invalid but we - // ignore them here because they are reported via the initialization - // cycle check. - if nval == len(cycle) { - return true - } + // Cycles through type parameter lists are ok (go.dev/issue/68162). + if tparCycle { + return true + } - // A cycle involving only types (and possibly functions) must have at least - // one type definition to be permitted: If there is no type definition, we - // have a sequence of alias type names which will expand ad infinitum. - if nval == 0 && ndef > 0 { - return true - } + // A cycle involving only constants and variables is invalid but we + // ignore them here because they are reported via the initialization + // cycle check. + if nval == len(cycle) { + return true + } + + // A cycle involving only types (and possibly functions) must have at least + // one type definition to be permitted: If there is no type definition, we + // have a sequence of alias type names which will expand ad infinitum. + if nval == 0 && ndef > 0 { + return true } check.cycleError(cycle, firstInSrc(cycle)) @@ -388,7 +391,7 @@ func (check *Checker) constDecl(obj *Const, typ, init syntax.Expr, inherited boo if !isConstType(t) { // don't report an error if the type is an invalid C (defined) type // (go.dev/issue/22090) - if isValid(under(t)) { + if isValid(t.Underlying()) { check.errorf(typ, InvalidConstType, "invalid constant type %s", t) } obj.typ = Typ[Invalid] @@ -473,7 +476,7 @@ func (check *Checker) isImportedConstraint(typ Type) bool { if named == nil || named.obj.pkg == check.pkg || named.obj.pkg == nil { return false } - u, _ := named.under().(*Interface) + u, _ := named.Underlying().(*Interface) return u != nil && !u.IsMethodSet() } @@ -533,7 +536,7 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *TypeN assert(rhs != nil) alias.fromRHS = rhs - unalias(alias) // resolve alias.actual + unalias(alias) // populate alias.actual } else { if !versionErr && tparam0 != nil { check.error(tdecl, UnsupportedFeature, "generic type alias requires GODEBUG=gotypesalias=1 or unset") @@ -555,31 +558,33 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *TypeN named := check.newNamed(obj, nil, nil) setDefType(def, named) + // The RHS of a named N can be nil if, for example, N is defined as a cycle of aliases with + // gotypesalias=0. Consider: + // + // type D N // N.unpack() will panic + // type N A + // type A = N // N.fromRHS is not set before N.unpack(), since A does not call setDefType + // + // There is likely a better way to detect such cases, but it may not be worth the effort. + // Instead, we briefly permit a nil N.fromRHS while type-checking D. + named.allowNilRHS = true + defer (func() { named.allowNilRHS = false })() + if tdecl.TParamList != nil { check.openScope(tdecl, "type parameters") defer check.closeScope() check.collectTypeParams(&named.tparams, tdecl.TParamList) } - // determine underlying type of named rhs = check.definedType(tdecl.Type, obj) assert(rhs != nil) named.fromRHS = rhs - // If the underlying type was not set while type-checking the right-hand - // side, it is invalid and an error should have been reported elsewhere. - if named.underlying == nil { - named.underlying = Typ[Invalid] - } - - // Disallow a lone type parameter as the RHS of a type declaration (go.dev/issue/45639). - // We don't need this restriction anymore if we make the underlying type of a type - // parameter its constraint interface: if the RHS is a lone type parameter, we will - // use its underlying type (like we do for any RHS in a type declaration), and its - // underlying type is an interface and the type declaration is well defined. + // spec: "In a type definition the given type cannot be a type parameter." + // (See also go.dev/issue/45639.) if isTypeParam(rhs) { check.error(tdecl.Type, MisplacedTypeParam, "cannot use a type parameter as RHS in type declaration") - named.underlying = Typ[Invalid] + named.fromRHS = Typ[Invalid] } } @@ -721,7 +726,7 @@ func (check *Checker) collectMethods(obj *TypeName) { } func (check *Checker) checkFieldUniqueness(base *Named) { - if t, _ := base.under().(*Struct); t != nil { + if t, _ := base.Underlying().(*Struct); t != nil { var mset objset for i := 0; i < base.NumMethods(); i++ { m := base.Method(i) diff --git a/src/cmd/compile/internal/types2/errors.go b/src/cmd/compile/internal/types2/errors.go index 44f2adc7b79..e2f5508a1a4 100644 --- a/src/cmd/compile/internal/types2/errors.go +++ b/src/cmd/compile/internal/types2/errors.go @@ -56,7 +56,7 @@ func (check *Checker) newError(code Code) *error_ { // Subsequent calls to addf provide additional information in the form of additional lines // in the error message (types2) or continuation errors identified by a tab-indented error // message (go/types). -func (err *error_) addf(at poser, format string, args ...interface{}) { +func (err *error_) addf(at poser, format string, args ...any) { err.desc = append(err.desc, errorDesc{atPos(at), err.check.sprintf(format, args...)}) } diff --git a/src/cmd/compile/internal/types2/expr.go b/src/cmd/compile/internal/types2/expr.go index e5f9a1c6f7c..39bf4055a37 100644 --- a/src/cmd/compile/internal/types2/expr.go +++ b/src/cmd/compile/internal/types2/expr.go @@ -361,7 +361,7 @@ func (check *Checker) updateExprType(x syntax.Expr, typ Type, final bool) { // If the new type is not final and still untyped, just // update the recorded type. if !final && isUntyped(typ) { - old.typ = under(typ).(*Basic) + old.typ = typ.Underlying().(*Basic) check.untyped[x] = old return } @@ -431,7 +431,7 @@ func (check *Checker) implicitTypeAndValue(x *operand, target Type) (Type, const return nil, nil, InvalidUntypedConversion } - switch u := under(target).(type) { + switch u := target.Underlying().(type) { case *Basic: if x.mode == constant_ { v, code := check.representation(x, u) @@ -616,7 +616,7 @@ Error: // incomparableCause returns a more specific cause why typ is not comparable. // If there is no more specific cause, the result is "". func (check *Checker) incomparableCause(typ Type) string { - switch under(typ).(type) { + switch typ.Underlying().(type) { case *Slice, *Signature, *Map: return compositeKind(typ) + " can only be compared to nil" } @@ -963,7 +963,7 @@ type target struct { // The result is nil if typ is not a signature. func newTarget(typ Type, desc string) *target { if typ != nil { - if sig, _ := under(typ).(*Signature); sig != nil { + if sig, _ := typ.Underlying().(*Signature); sig != nil { return &target{sig, desc} } } @@ -1112,7 +1112,7 @@ func (check *Checker) exprInternal(T *target, x *operand, e syntax.Expr, hint Ty check.errorf(x, InvalidAssert, invalidOp+"cannot use type assertion on type parameter value %s", x) goto Error } - if _, ok := under(x.typ).(*Interface); !ok { + if _, ok := x.typ.Underlying().(*Interface); !ok { check.errorf(x, InvalidAssert, invalidOp+"%s is not an interface", x) goto Error } @@ -1247,7 +1247,7 @@ Error: // represented as an integer (such as 1.0) it is returned as an integer value. // This ensures that constants of different kind but equal value (such as // 1.0 + 0i, 1.0, 1) result in the same value. -func keyVal(x constant.Value) interface{} { +func keyVal(x constant.Value) any { switch x.Kind() { case constant.Complex: f := constant.ToFloat(x) diff --git a/src/cmd/compile/internal/types2/gcsizes.go b/src/cmd/compile/internal/types2/gcsizes.go index 15f3e006425..54e8ea23c17 100644 --- a/src/cmd/compile/internal/types2/gcsizes.go +++ b/src/cmd/compile/internal/types2/gcsizes.go @@ -16,7 +16,7 @@ func (s *gcSizes) Alignof(T Type) (result int64) { // For arrays and structs, alignment is defined in terms // of alignment of the elements and fields, respectively. - switch t := under(T).(type) { + switch t := T.Underlying().(type) { case *Array: // spec: "For a variable x of array type: unsafe.Alignof(x) // is the same as unsafe.Alignof(x[0]), but at least 1." @@ -96,7 +96,7 @@ func (s *gcSizes) Offsetsof(fields []*Var) []int64 { } func (s *gcSizes) Sizeof(T Type) int64 { - switch t := under(T).(type) { + switch t := T.Underlying().(type) { case *Basic: assert(isTyped(T)) k := t.kind diff --git a/src/cmd/compile/internal/types2/hilbert_test.go b/src/cmd/compile/internal/types2/hilbert_test.go index df8a3e7d78a..6cc0974c334 100644 --- a/src/cmd/compile/internal/types2/hilbert_test.go +++ b/src/cmd/compile/internal/types2/hilbert_test.go @@ -68,7 +68,7 @@ type gen struct { bytes.Buffer } -func (g *gen) p(format string, args ...interface{}) { +func (g *gen) p(format string, args ...any) { fmt.Fprintf(&g.Buffer, format, args...) } diff --git a/src/cmd/compile/internal/types2/index.go b/src/cmd/compile/internal/types2/index.go index 7e16a87332d..ca84184d35a 100644 --- a/src/cmd/compile/internal/types2/index.go +++ b/src/cmd/compile/internal/types2/index.go @@ -35,7 +35,7 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo return false case value: - if sig, _ := under(x.typ).(*Signature); sig != nil && sig.TypeParams().Len() > 0 { + if sig, _ := x.typ.Underlying().(*Signature); sig != nil && sig.TypeParams().Len() > 0 { // function instantiation return true } @@ -50,7 +50,7 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo // ordinary index expression valid := false length := int64(-1) // valid if >= 0 - switch typ := under(x.typ).(type) { + switch typ := x.typ.Underlying().(type) { case *Basic: if isString(typ) { valid = true @@ -73,7 +73,7 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo x.typ = typ.elem case *Pointer: - if typ, _ := under(typ.base).(*Array); typ != nil { + if typ, _ := typ.base.Underlying().(*Array); typ != nil { valid = true length = typ.len x.mode = variable @@ -124,7 +124,7 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo mode = value } case *Pointer: - if t, _ := under(t.base).(*Array); t != nil { + if t, _ := t.base.Underlying().(*Array); t != nil { l = t.len e = t.elem } @@ -247,7 +247,7 @@ func (check *Checker) sliceExpr(x *operand, e *syntax.SliceExpr) { // but don't go from untyped string to string. cu = Typ[String] if !isTypeParam(x.typ) { - cu = under(x.typ) // untyped string remains untyped + cu = x.typ.Underlying() // untyped string remains untyped } } @@ -292,7 +292,7 @@ func (check *Checker) sliceExpr(x *operand, e *syntax.SliceExpr) { x.typ = &Slice{elem: u.elem} case *Pointer: - if u, _ := under(u.base).(*Array); u != nil { + if u, _ := u.base.Underlying().(*Array); u != nil { valid = true length = u.len x.typ = &Slice{elem: u.elem} diff --git a/src/cmd/compile/internal/types2/infer.go b/src/cmd/compile/internal/types2/infer.go index 08d42296905..996f6a51090 100644 --- a/src/cmd/compile/internal/types2/infer.go +++ b/src/cmd/compile/internal/types2/infer.go @@ -427,7 +427,7 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type, // Note that if t0 was a signature, t1 must be a signature, and t1 // can only be a generic signature if it originated from a generic // function argument. Those signatures are never defined types and - // thus there is no need to call under below. + // thus there is no need to call Underlying below. // TODO(gri) Consider doing this in Checker.subst. // Then this would fall out automatically here and also // in instantiation (where we also explicitly nil out @@ -668,7 +668,7 @@ func coreTerm(tpar *TypeParam) (*term, bool) { if n == 1 { if debug { u, _ := commonUnder(tpar, nil) - assert(under(single.typ) == u) + assert(single.typ.Underlying() == u) } return single, true } diff --git a/src/cmd/compile/internal/types2/instantiate.go b/src/cmd/compile/internal/types2/instantiate.go index 1c8c12d07cf..3c4044ed3c9 100644 --- a/src/cmd/compile/internal/types2/instantiate.go +++ b/src/cmd/compile/internal/types2/instantiate.go @@ -83,7 +83,7 @@ func Instantiate(ctxt *Context, orig Type, targs []Type, validate bool) (Type, e // // For Named types the resulting instance may be unexpanded. // -// check may be nil (when not type-checking syntax); pos is used only only if check is non-nil. +// check may be nil (when not type-checking syntax); pos is used only if check is non-nil. func (check *Checker) instance(pos syntax.Pos, orig genericType, targs []Type, expanding *Named, ctxt *Context) (res Type) { // The order of the contexts below matters: we always prefer instances in the // expanding instance context in order to preserve reference cycles. @@ -226,12 +226,12 @@ func (check *Checker) verify(pos syntax.Pos, tparams []*TypeParam, targs []Type, // If the provided cause is non-nil, it may be set to an error string // explaining why V does not implement (or satisfy, for constraints) T. func (check *Checker) implements(V, T Type, constraint bool, cause *string) bool { - Vu := under(V) - Tu := under(T) + Vu := V.Underlying() + Tu := T.Underlying() if !isValid(Vu) || !isValid(Tu) { return true // avoid follow-on errors } - if p, _ := Vu.(*Pointer); p != nil && !isValid(under(p.base)) { + if p, _ := Vu.(*Pointer); p != nil && !isValid(p.base.Underlying()) { return true // avoid follow-on errors (see go.dev/issue/49541 for an example) } @@ -339,7 +339,7 @@ func (check *Checker) implements(V, T Type, constraint bool, cause *string) bool // If V ∉ t.typ but V ∈ ~t.typ then remember this type // so we can suggest it as an alternative in the error // message. - if alt == nil && !t.tilde && Identical(t.typ, under(t.typ)) { + if alt == nil && !t.tilde && Identical(t.typ, t.typ.Underlying()) { tt := *t tt.tilde = true if tt.includes(V) { diff --git a/src/cmd/compile/internal/types2/lookup.go b/src/cmd/compile/internal/types2/lookup.go index 624b510dc83..3e18db09f5c 100644 --- a/src/cmd/compile/internal/types2/lookup.go +++ b/src/cmd/compile/internal/types2/lookup.go @@ -145,14 +145,14 @@ func lookupFieldOrMethodImpl(T Type, addressable bool, pkg *Package, name string return // blank fields/methods are never found } - // Importantly, we must not call under before the call to deref below (nor - // does deref call under), as doing so could incorrectly result in finding + // Importantly, we must not call Underlying before the call to deref below (nor + // does deref call Underlying), as doing so could incorrectly result in finding // methods of the pointer base type when T is a (*Named) pointer type. typ, isPtr := deref(T) // *typ where typ is an interface (incl. a type parameter) has no methods. if isPtr { - if _, ok := under(typ).(*Interface); ok { + if _, ok := typ.Underlying().(*Interface); ok { return } } @@ -202,7 +202,7 @@ func lookupFieldOrMethodImpl(T Type, addressable bool, pkg *Package, name string } } - switch t := under(typ).(type) { + switch t := typ.Underlying().(type) { case *Struct: // look for a matching field and collect embedded types for i, f := range t.fields { @@ -373,7 +373,7 @@ func MissingMethod(V Type, T *Interface, static bool) (method *Func, wrongType b // The comparator is used to compare signatures. // If a method is missing and cause is not nil, *cause describes the error. func (check *Checker) missingMethod(V, T Type, static bool, equivalent func(x, y Type) bool, cause *string) (method *Func, wrongType bool) { - methods := under(T).(*Interface).typeSet().methods // T must be an interface + methods := T.Underlying().(*Interface).typeSet().methods // T must be an interface if len(methods) == 0 { return nil, false } @@ -393,7 +393,7 @@ func (check *Checker) missingMethod(V, T Type, static bool, equivalent func(x, y var m *Func // method on T we're trying to implement var f *Func // method on V, if found (state is one of ok, wrongName, wrongSig) - if u, _ := under(V).(*Interface); u != nil { + if u, _ := V.Underlying().(*Interface); u != nil { tset := u.typeSet() for _, m = range methods { _, f = tset.LookupMethod(m.pkg, m.name, false) @@ -534,7 +534,7 @@ func (check *Checker) hasAllMethods(V, T Type, static bool, equivalent func(x, y // hasInvalidEmbeddedFields reports whether T is a struct (or a pointer to a struct) that contains // (directly or indirectly) embedded fields with invalid types. func hasInvalidEmbeddedFields(T Type, seen map[*Struct]bool) bool { - if S, _ := under(derefStructPtr(T)).(*Struct); S != nil && !seen[S] { + if S, _ := derefStructPtr(T).Underlying().(*Struct); S != nil && !seen[S] { if seen == nil { seen = make(map[*Struct]bool) } @@ -549,14 +549,14 @@ func hasInvalidEmbeddedFields(T Type, seen map[*Struct]bool) bool { } func isInterfacePtr(T Type) bool { - p, _ := under(T).(*Pointer) + p, _ := T.Underlying().(*Pointer) return p != nil && IsInterface(p.base) } // check may be nil. func (check *Checker) interfacePtrError(T Type) string { assert(isInterfacePtr(T)) - if p, _ := under(T).(*Pointer); isTypeParam(p.base) { + if p, _ := T.Underlying().(*Pointer); isTypeParam(p.base) { return check.sprintf("type %s is pointer to type parameter, not type parameter", T) } return check.sprintf("type %s is pointer to interface, not interface", T) @@ -629,8 +629,8 @@ func deref(typ Type) (Type, bool) { // derefStructPtr dereferences typ if it is a (named or unnamed) pointer to a // (named or unnamed) struct and returns its base. Otherwise it returns typ. func derefStructPtr(typ Type) Type { - if p, _ := under(typ).(*Pointer); p != nil { - if _, ok := under(p.base).(*Struct); ok { + if p, _ := typ.Underlying().(*Pointer); p != nil { + if _, ok := p.base.Underlying().(*Struct); ok { return p.base } } diff --git a/src/cmd/compile/internal/types2/named.go b/src/cmd/compile/internal/types2/named.go index d02b95e874a..b5c8ed142aa 100644 --- a/src/cmd/compile/internal/types2/named.go +++ b/src/cmd/compile/internal/types2/named.go @@ -33,7 +33,7 @@ import ( // In cases 1, 3, and 4, it is possible that the underlying type or methods of // N may not be immediately available. // - During type-checking, we allocate N before type-checking its underlying -// type or methods, so that we may resolve recursive references. +// type or methods, so that we can create recursive references. // - When loading from export data, we may load its methods and underlying // type lazily using a provided load function. // - After instantiating, we lazily expand the underlying type and methods @@ -47,10 +47,8 @@ import ( // soon. // // We achieve this by tracking state with an atomic state variable, and -// guarding potentially concurrent calculations with a mutex. At any point in -// time this state variable determines which data on N may be accessed. As -// state monotonically progresses, any data available at state M may be -// accessed without acquiring the mutex at state N, provided N >= M. +// guarding potentially concurrent calculations with a mutex. See [stateMask] +// for details. // // GLOSSARY: Here are a few terms used in this file to describe Named types: // - We say that a Named type is "instantiated" if it has been constructed by @@ -59,18 +57,19 @@ import ( // declaration in the source. Instantiated named types correspond to a type // instantiation in the source, not a declaration. But their Origin type is // a declared type. -// - We say that a Named type is "resolved" if its RHS information has been -// loaded or fully type-checked. For Named types constructed from export -// data, this may involve invoking a loader function to extract information -// from export data. For instantiated named types this involves reading -// information from their origin. +// - We say that a Named type is "unpacked" if its RHS information has been +// populated, normalizing its representation for use in type-checking +// operations and abstracting away how it was created: +// - For a Named type constructed from unified IR, this involves invoking +// a lazy loader function to extract details from UIR as needed. +// - For an instantiated Named type, this involves extracting information +// from its origin and substituting type arguments into a "synthetic" +// RHS; this process is called "expanding" the RHS (see below). // - We say that a Named type is "expanded" if it is an instantiated type and -// type parameters in its underlying type and methods have been substituted -// with the type arguments from the instantiation. A type may be partially -// expanded if some but not all of these details have been substituted. -// Similarly, we refer to these individual details (underlying type or -// method) as being "expanded". -// - When all information is known for a named type, we say it is "complete". +// type parameters in its RHS and methods have been substituted with the type +// arguments from the instantiation. A type may be partially expanded if some +// but not all of these details have been substituted. Similarly, we refer to +// these individual details (RHS or method) as being "expanded". // // Some invariants to keep in mind: each declared Named type has a single // corresponding object, and that object's type is the (possibly generic) Named @@ -87,8 +86,8 @@ import ( // presence of a cycle of named types, expansion will eventually find an // existing instance in the Context and short-circuit the expansion. // -// Once an instance is complete, we can nil out this shared Context to unpin -// memory, though this Context may still be held by other incomplete instances +// Once an instance is fully expanded, we can nil out this shared Context to unpin +// memory, though the Context may still be held by other incomplete instances // in its "lineage". // A Named represents a named (defined) type. @@ -107,18 +106,17 @@ type Named struct { check *Checker // non-nil during type-checking; nil otherwise obj *TypeName // corresponding declared object for declared types; see above for instantiated types - // fromRHS holds the type (on RHS of declaration) this *Named type is derived - // from (for cycle reporting). Only used by validType, and therefore does not - // require synchronization. - fromRHS Type + // flags indicating temporary violations of the invariants for fromRHS and underlying + allowNilRHS bool // same as below, as well as briefly during checking of a type declaration + allowNilUnderlying bool // may be true from creation via [NewNamed] until [Named.SetUnderlying] - // information for instantiated types; nil otherwise - inst *instance + inst *instance // information for instantiated types; nil otherwise mu sync.Mutex // guards all fields below - state_ uint32 // the current state of this type; must only be accessed atomically - underlying Type // possibly a *Named during setup; never a *Named once set up completely + state_ uint32 // the current state of this type; must only be accessed atomically or when mu is held + fromRHS Type // the declaration RHS this type is derived from tparams *TypeParamList // type parameters, or nil + underlying Type // underlying type, or nil // methods declared for this type (not the method set of this type) // Signatures are type-checked lazily. @@ -140,15 +138,43 @@ type instance struct { ctxt *Context // local Context; set to nil after full expansion } -// namedState represents the possible states that a named type may assume. -type namedState uint32 +// stateMask represents each state in the lifecycle of a named type. +// +// Each named type begins in the initial state. A named type may transition to a new state +// according to the below diagram: +// +// initial +// lazyLoaded +// unpacked +// └── hasMethods +// └── hasUnder +// +// That is, descent down the tree is mostly linear (initial through unpacked), except upon +// reaching the leaves (hasMethods and hasUnder). A type may occupy any combination of the +// leaf states at once (they are independent states). +// +// To represent this independence, the set of active states is represented with a bit set. State +// transitions are monotonic. Once a state bit is set, it remains set. +// +// The above constraints significantly narrow the possible bit sets for a named type. With bits +// set left-to-right, they are: +// +// 0000 | initial +// 1000 | lazyLoaded +// 1100 | unpacked, which implies lazyLoaded +// 1110 | hasMethods, which implies unpacked (which in turn implies lazyLoaded) +// 1101 | hasUnder, which implies unpacked ... +// 1111 | both hasMethods and hasUnder which implies unpacked ... +// +// To read the state of a named type, use [Named.stateHas]; to write, use [Named.setState]. +type stateMask uint32 -// Note: the order of states is relevant const ( - unresolved namedState = iota // tparams, underlying type and methods might be unavailable - resolved // resolve has run; methods might be unexpanded (for instances) - loaded // loader has run; constraints might be unexpanded (for generic types) - complete // all data is known + // initially, type parameters, RHS, underlying, and methods might be unavailable + lazyLoaded stateMask = 1 << iota // methods are available, but constraints might be unexpanded (for generic types) + unpacked // methods might be unexpanded (for instances) + hasMethods // methods are all expanded (for instances) + hasUnder // underlying type is available ) // NewNamed returns a new named type for the given type name, underlying type, and associated methods. @@ -158,31 +184,38 @@ func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named { if asNamed(underlying) != nil { panic("underlying type must not be *Named") } - return (*Checker)(nil).newNamed(obj, underlying, methods) + n := (*Checker)(nil).newNamed(obj, underlying, methods) + if underlying == nil { + n.allowNilRHS = true + n.allowNilUnderlying = true + } else { + n.SetUnderlying(underlying) + } + return n + } -// resolve resolves the type parameters, methods, and underlying type of n. +// unpack populates the type parameters, methods, and RHS of n. // -// For the purposes of resolution, there are three categories of named types: -// 1. Instantiated Types -// 2. Lazy Loaded Types -// 3. All Others +// For the purposes of unpacking, there are three categories of named types: +// 1. Lazy loaded types +// 2. Instantiated types +// 3. All others // // Note that the above form a partition. // -// Instantiated types: -// Type parameters, methods, and underlying type of n become accessible, -// though methods are lazily populated as needed. -// // Lazy loaded types: -// Type parameters, methods, and underlying type of n become accessible -// and are fully expanded. +// Type parameters, methods, and RHS of n become accessible and are fully +// expanded. +// +// Instantiated types: +// Type parameters, methods, and RHS of n become accessible, though methods +// are lazily populated as needed. // // All others: -// Effectively, nothing happens. The underlying type of n may still be -// a named type. -func (n *Named) resolve() *Named { - if n.state() > unresolved { // avoid locking below +// Effectively, nothing happens. +func (n *Named) unpack() *Named { + if n.stateHas(lazyLoaded | unpacked) { // avoid locking below return n } @@ -191,27 +224,29 @@ func (n *Named) resolve() *Named { n.mu.Lock() defer n.mu.Unlock() - if n.state() > unresolved { + // only atomic for consistency; we are holding the mutex + if n.stateHas(lazyLoaded | unpacked) { return n } + // underlying comes after unpacking, do not set it + defer (func() { assert(!n.stateHas(hasUnder)) })() + if n.inst != nil { - assert(n.underlying == nil) // n is an unresolved instance - assert(n.loader == nil) // instances are created by instantiation, in which case n.loader is nil + assert(n.fromRHS == nil) // instantiated types are not declared types + assert(n.loader == nil) // cannot import an instantiation orig := n.inst.orig - orig.resolve() - underlying := n.expandUnderlying() + orig.unpack() + n.fromRHS = n.expandRHS() n.tparams = orig.tparams - n.underlying = underlying - n.fromRHS = orig.fromRHS // for cycle detection if len(orig.methods) == 0 { - n.setState(complete) // nothing further to do + n.setState(lazyLoaded | unpacked | hasMethods) // nothing further to do n.inst.ctxt = nil } else { - n.setState(resolved) + n.setState(lazyLoaded | unpacked) } return n } @@ -224,43 +259,57 @@ func (n *Named) resolve() *Named { // methods would need to support reentrant calls though. It would // also make the API more future-proof towards further extensions. if n.loader != nil { - assert(n.underlying == nil) - assert(n.TypeArgs().Len() == 0) // instances are created by instantiation, in which case n.loader is nil + assert(n.fromRHS == nil) // not loaded yet + assert(n.inst == nil) // cannot import an instantiation tparams, underlying, methods, delayed := n.loader(n) n.loader = nil n.tparams = bindTParams(tparams) - n.underlying = underlying n.fromRHS = underlying // for cycle detection n.methods = methods - // advance state to avoid deadlock calling delayed functions - n.setState(loaded) - + n.setState(lazyLoaded) // avoid deadlock calling delayed functions for _, f := range delayed { f() } } - n.setState(complete) + n.setState(lazyLoaded | unpacked | hasMethods) return n } -// state atomically accesses the current state of the receiver. -func (n *Named) state() namedState { - return namedState(atomic.LoadUint32(&n.state_)) +// stateHas atomically determines whether the current state includes any active bit in sm. +func (n *Named) stateHas(m stateMask) bool { + return stateMask(atomic.LoadUint32(&n.state_))&m != 0 } -// setState atomically stores the given state for n. +// setState atomically sets the current state to include each active bit in sm. // Must only be called while holding n.mu. -func (n *Named) setState(state namedState) { - atomic.StoreUint32(&n.state_, uint32(state)) +func (n *Named) setState(m stateMask) { + atomic.OrUint32(&n.state_, uint32(m)) + // verify state transitions + if debug { + m := stateMask(atomic.LoadUint32(&n.state_)) + u := m&unpacked != 0 + // unpacked => lazyLoaded + if u { + assert(m&lazyLoaded != 0) + } + // hasMethods => unpacked + if m&hasMethods != 0 { + assert(u) + } + // hasUnder => unpacked + if m&hasUnder != 0 { + assert(u) + } + } } // newNamed is like NewNamed but with a *Checker receiver. -func (check *Checker) newNamed(obj *TypeName, underlying Type, methods []*Func) *Named { - typ := &Named{check: check, obj: obj, fromRHS: underlying, underlying: underlying, methods: methods} +func (check *Checker) newNamed(obj *TypeName, fromRHS Type, methods []*Func) *Named { + typ := &Named{check: check, obj: obj, fromRHS: fromRHS, methods: methods} if obj.typ == nil { obj.typ = typ } @@ -300,25 +349,13 @@ func (check *Checker) newNamedInstance(pos syntax.Pos, orig *Named, targs []Type return typ } -func (t *Named) cleanup() { - assert(t.inst == nil || t.inst.orig.inst == nil) - // Ensure that every defined type created in the course of type-checking has - // either non-*Named underlying type, or is unexpanded. - // - // This guarantees that we don't leak any types whose underlying type is - // *Named, because any unexpanded instances will lazily compute their - // underlying type by substituting in the underlying type of their origin. - // The origin must have either been imported or type-checked and expanded - // here, and in either case its underlying type will be fully expanded. - switch t.underlying.(type) { - case nil: - if t.TypeArgs().Len() == 0 { - panic("nil underlying") - } - case *Named, *Alias: - t.under() // t.under may add entries to check.cleaners +func (n *Named) cleanup() { + // Instances can have a nil underlying at the end of type checking — they + // will lazily expand it as needed. All other types must have one. + if n.inst == nil { + n.Underlying() } - t.check = nil + n.check = nil } // Obj returns the type name for the declaration defining the named type t. For @@ -341,13 +378,13 @@ func (t *Named) Origin() *Named { // TypeParams returns the type parameters of the named type t, or nil. // The result is non-nil for an (originally) generic type even if it is instantiated. -func (t *Named) TypeParams() *TypeParamList { return t.resolve().tparams } +func (t *Named) TypeParams() *TypeParamList { return t.unpack().tparams } // SetTypeParams sets the type parameters of the named type t. // t must not have type arguments. func (t *Named) SetTypeParams(tparams []*TypeParam) { assert(t.inst == nil) - t.resolve().tparams = bindTParams(tparams) + t.unpack().tparams = bindTParams(tparams) } // TypeArgs returns the type arguments used to instantiate the named type t. @@ -360,7 +397,7 @@ func (t *Named) TypeArgs() *TypeList { // NumMethods returns the number of explicit methods defined for t. func (t *Named) NumMethods() int { - return len(t.Origin().resolve().methods) + return len(t.Origin().unpack().methods) } // Method returns the i'th method of named type t for 0 <= i < t.NumMethods(). @@ -375,13 +412,13 @@ func (t *Named) NumMethods() int { // But the specific ordering is not specified and must not be relied on as it may // change in the future. func (t *Named) Method(i int) *Func { - t.resolve() + t.unpack() - if t.state() >= complete { + if t.stateHas(hasMethods) { return t.methods[i] } - assert(t.inst != nil) // only instances should have incomplete methods + assert(t.inst != nil) // only instances should have unexpanded methods orig := t.inst.orig t.mu.Lock() @@ -398,9 +435,9 @@ func (t *Named) Method(i int) *Func { t.inst.expandedMethods++ // Check if we've created all methods at this point. If we have, mark the - // type as fully expanded. + // type as having all of its methods. if t.inst.expandedMethods == len(orig.methods) { - t.setState(complete) + t.setState(hasMethods) t.inst.ctxt = nil // no need for a context anymore } } @@ -469,18 +506,25 @@ func (t *Named) expandMethod(i int) *Func { // SetUnderlying sets the underlying type and marks t as complete. // t must not have type arguments. -func (t *Named) SetUnderlying(underlying Type) { +func (t *Named) SetUnderlying(u Type) { assert(t.inst == nil) - if underlying == nil { + if u == nil { panic("underlying type must not be nil") } - if asNamed(underlying) != nil { + if asNamed(u) != nil { panic("underlying type must not be *Named") } - t.resolve().underlying = underlying - if t.fromRHS == nil { - t.fromRHS = underlying // for cycle detection - } + // be careful to uphold the state invariants + t.mu.Lock() + defer t.mu.Unlock() + + t.fromRHS = u + t.allowNilRHS = false + t.setState(lazyLoaded | unpacked | hasMethods) // TODO(markfreeman): Why hasMethods? + + t.underlying = u + t.allowNilUnderlying = false + t.setState(hasUnder) } // AddMethod adds method m unless it is already in the method list. @@ -489,7 +533,7 @@ func (t *Named) SetUnderlying(underlying Type) { func (t *Named) AddMethod(m *Func) { assert(samePkg(t.obj.pkg, m.pkg)) assert(t.inst == nil) - t.resolve() + t.unpack() if t.methodIndex(m.name, false) < 0 { t.methods = append(t.methods, m) } @@ -518,14 +562,39 @@ func (t *Named) methodIndex(name string, foldCase bool) int { return -1 } +// rhs returns [Named.fromRHS]. +// +// In debug mode, it also asserts that n is in an appropriate state. +func (n *Named) rhs() Type { + if debug { + assert(n.stateHas(lazyLoaded | unpacked)) + } + return n.fromRHS +} + // Underlying returns the [underlying type] of the named type t, resolving all // forwarding declarations. Underlying types are never Named, TypeParam, or // Alias types. // // [underlying type]: https://go.dev/ref/spec#Underlying_types. -func (t *Named) Underlying() Type { - // TODO(gri) Investigate if Unalias can be moved to where underlying is set. - return Unalias(t.resolve().underlying) +func (n *Named) Underlying() Type { + n.unpack() + + // The gccimporter depends on writing a nil underlying via NewNamed and + // immediately reading it back. Rather than putting that in Named.under + // and complicating things there, we just check for that special case here. + if n.rhs() == nil { + assert(n.allowNilRHS) + if n.allowNilUnderlying { + return nil + } + } + + if !n.stateHas(hasUnder) { // minor performance optimization + n.resolveUnderlying() + } + + return n.underlying } func (t *Named) String() string { return TypeString(t, nil) } @@ -536,96 +605,78 @@ func (t *Named) String() string { return TypeString(t, nil) } // TODO(rfindley): reorganize the loading and expansion methods under this // heading. -// under returns the expanded underlying type of n0; possibly by following -// forward chains of named types. If an underlying type is found, resolve -// the chain by setting the underlying type for each defined type in the -// chain before returning it. If no underlying type is found or a cycle -// is detected, the result is Typ[Invalid]. If a cycle is detected and -// n0.check != nil, the cycle is reported. +// resolveUnderlying computes the underlying type of n. If n already has an +// underlying type, nothing happens. // -// This is necessary because the underlying type of named may be itself a -// named type that is incomplete: +// It does so by following RHS type chains for alias and named types. If any +// other type T is found, each named type in the chain has its underlying +// type set to T. Aliases are skipped because their underlying type is +// not memoized. // -// type ( -// A B -// B *C -// C A -// ) -// -// The type of C is the (named) type of A which is incomplete, -// and which has as its underlying type the named type B. -func (n0 *Named) under() Type { - u := n0.Underlying() +// resolveUnderlying assumes that there are no direct cycles; if there were +// any, they were broken (by setting the respective types to invalid) during +// the directCycles check phase. +func (n *Named) resolveUnderlying() { + assert(n.stateHas(unpacked)) - // If the underlying type of a defined type is not a defined - // (incl. instance) type, then that is the desired underlying - // type. - var n1 *Named - switch u1 := u.(type) { - case nil: - // After expansion via Underlying(), we should never encounter a nil - // underlying. - panic("nil underlying") - default: - // common case - return u - case *Named: - // handled below - n1 = u1 + var seen map[*Named]bool // for debugging only + if debug { + seen = make(map[*Named]bool) } - if n0.check == nil { - panic("Named.check == nil but type is incomplete") - } - - // Invariant: after this point n0 as well as any named types in its - // underlying chain should be set up when this function exits. - check := n0.check - n := n0 - - seen := make(map[*Named]int) // types that need their underlying type resolved - var path []Object // objects encountered, for cycle reporting - -loop: - for { - seen[n] = len(seen) - path = append(path, n.obj) - n = n1 - if i, ok := seen[n]; ok { - // cycle - check.cycleError(path[i:], firstInSrc(path[i:])) - u = Typ[Invalid] - break - } - u = n.Underlying() - switch u1 := u.(type) { + var path []*Named + var u Type + for rhs := Type(n); u == nil; { + switch t := rhs.(type) { case nil: u = Typ[Invalid] - break loop - default: - break loop + + case *Alias: + rhs = unalias(t) + case *Named: - // Continue collecting *Named types in the chain. - n1 = u1 + if debug { + assert(!seen[t]) + seen[t] = true + } + + // don't recalculate the underlying + if t.stateHas(hasUnder) { + u = t.underlying + break + } + + if debug { + seen[t] = true + } + path = append(path, t) + + t.unpack() + assert(t.rhs() != nil || t.allowNilRHS) + rhs = t.rhs() + + default: + u = rhs // any type literal or predeclared type works } } - for n := range seen { - // We should never have to update the underlying type of an imported type; - // those underlying types should have been resolved during the import. - // Also, doing so would lead to a race condition (was go.dev/issue/31749). - // Do this check always, not just in debug mode (it's cheap). - if n.obj.pkg != check.pkg { - panic("imported type with unresolved underlying type") - } - n.underlying = u + for _, t := range path { + func() { + t.mu.Lock() + defer t.mu.Unlock() + // Careful, t.underlying has lock-free readers. Since we might be racing + // another call to resolveUnderlying, we have to avoid overwriting + // t.underlying. Otherwise, the race detector will be tripped. + if !t.stateHas(hasUnder) { + t.underlying = u + t.setState(hasUnder) + } + }() } - - return u } func (n *Named) lookupMethod(pkg *Package, name string, foldCase bool) (int, *Func) { - n.resolve() + n.unpack() if samePkg(n.obj.pkg, pkg) || isExported(name) || foldCase { // If n is an instance, we may not have yet instantiated all of its methods. // Look up the method index in orig, and only instantiate method at the @@ -646,78 +697,106 @@ func (check *Checker) context() *Context { return check.ctxt } -// expandUnderlying substitutes type arguments in the underlying type n.orig, -// returning the result. Returns Typ[Invalid] if there was an error. -func (n *Named) expandUnderlying() Type { +// expandRHS crafts a synthetic RHS for an instantiated type using the RHS of +// its origin type (which must be a generic type). +// +// Suppose that we had: +// +// type T[P any] struct { +// f P +// } +// +// type U T[int] +// +// When we go to U, we observe T[int]. Since T[int] is an instantiation, it has no +// declaration. Here, we craft a synthetic RHS for T[int] as if it were declared, +// somewhat similar to: +// +// type T[int] struct { +// f int +// } +// +// And note that the synthetic RHS here is the same as the underlying for U. Now, +// consider: +// +// type T[_ any] U +// type U int +// type V T[U] +// +// The synthetic RHS for T[U] becomes: +// +// type T[U] U +// +// Whereas the underlying of V is int, not U. +func (n *Named) expandRHS() (rhs Type) { check := n.check if check != nil && check.conf.Trace { - check.trace(n.obj.pos, "-- Named.expandUnderlying %s", n) + check.trace(n.obj.pos, "-- Named.expandRHS %s", n) check.indent++ defer func() { check.indent-- - check.trace(n.obj.pos, "=> %s (tparams = %s, under = %s)", n, n.tparams.list(), n.underlying) + check.trace(n.obj.pos, "=> %s (rhs = %s)", n, rhs) }() } - assert(n.inst.orig.underlying != nil) + assert(!n.stateHas(unpacked)) + assert(n.inst.orig.stateHas(lazyLoaded | unpacked)) + if n.inst.ctxt == nil { n.inst.ctxt = NewContext() } + ctxt := n.inst.ctxt orig := n.inst.orig + targs := n.inst.targs + tpars := orig.tparams - if asNamed(orig.underlying) != nil { - // We should only get a Named underlying type here during type checking - // (for example, in recursive type declarations). - assert(check != nil) - } - - if orig.tparams.Len() != targs.Len() { - // Mismatching arg and tparam length may be checked elsewhere. + if targs.Len() != tpars.Len() { return Typ[Invalid] } - // Ensure that an instance is recorded before substituting, so that we - // resolve n for any recursive references. - h := n.inst.ctxt.instanceHash(orig, targs.list()) - n2 := n.inst.ctxt.update(h, orig, n.TypeArgs().list(), n) - assert(n == n2) + h := ctxt.instanceHash(orig, targs.list()) + u := ctxt.update(h, orig, targs.list(), n) // block fixed point infinite instantiation + assert(n == u) - smap := makeSubstMap(orig.tparams.list(), targs.list()) - var ctxt *Context + m := makeSubstMap(tpars.list(), targs.list()) if check != nil { ctxt = check.context() } - underlying := n.check.subst(n.obj.pos, orig.underlying, smap, n, ctxt) - // If the underlying type of n is an interface, we need to set the receiver of - // its methods accurately -- we set the receiver of interface methods on - // the RHS of a type declaration to the defined type. - if iface, _ := underlying.(*Interface); iface != nil { + + rhs = check.subst(n.obj.pos, orig.rhs(), m, n, ctxt) + + // TODO(markfreeman): Can we handle this in substitution? + // If the RHS is an interface, we must set the receiver of interface methods + // to the named type. + if iface, _ := rhs.(*Interface); iface != nil { if methods, copied := replaceRecvType(iface.methods, orig, n); copied { - // If the underlying type doesn't actually use type parameters, it's - // possible that it wasn't substituted. In this case we need to create - // a new *Interface before modifying receivers. - if iface == orig.underlying { - old := iface - iface = check.newInterface() - iface.embeddeds = old.embeddeds - assert(old.complete) // otherwise we are copying incomplete data - iface.complete = old.complete - iface.implicit = old.implicit // should be false but be conservative - underlying = iface + // If the RHS doesn't use type parameters, it may not have been + // substituted; we need to craft a new interface first. + if iface == orig.rhs() { + assert(iface.complete) // otherwise we are copying incomplete data + + crafted := check.newInterface() + crafted.complete = true + crafted.implicit = false + crafted.embeddeds = iface.embeddeds + + iface = crafted } iface.methods = methods iface.tset = nil // recompute type set with new methods - // If check != nil, check.newInterface will have saved the interface for later completion. - if check == nil { // golang/go#61561: all newly created interfaces must be fully evaluated + // go.dev/issue/61561: We have to complete the interface even without a checker. + if check == nil { iface.typeSet() } + + return iface } } - return underlying + return rhs } // safeUnderlying returns the underlying type of typ without expanding diff --git a/src/cmd/compile/internal/types2/object.go b/src/cmd/compile/internal/types2/object.go index 7096c556971..ce129dbf590 100644 --- a/src/cmd/compile/internal/types2/object.go +++ b/src/cmd/compile/internal/types2/object.go @@ -292,10 +292,11 @@ func NewTypeName(pos syntax.Pos, pkg *Package, name string, typ Type) *TypeName } // NewTypeNameLazy returns a new defined type like NewTypeName, but it -// lazily calls resolve to finish constructing the Named object. +// lazily calls unpack to finish constructing the Named object. func NewTypeNameLazy(pos syntax.Pos, pkg *Package, name string, load func(*Named) ([]*TypeParam, Type, []*Func, []func())) *TypeName { obj := NewTypeName(pos, pkg, name, nil) - NewNamed(obj, nil, nil).loader = load + n := (*Checker)(nil).newNamed(obj, nil, nil) + n.loader = load return obj } @@ -327,7 +328,7 @@ func (obj *TypeName) IsAlias() bool { } } -// A Variable represents a declared variable (including function parameters and results, and struct fields). +// A Var represents a declared variable (including function parameters and results, and struct fields). type Var struct { object origin *Var // if non-nil, the Var from which this one was instantiated @@ -638,7 +639,7 @@ func writeObject(buf *bytes.Buffer, obj Object, qf Qualifier) { } else { // TODO(gri) should this be fromRHS for *Named? // (See discussion in #66559.) - typ = under(typ) + typ = typ.Underlying() } } diff --git a/src/cmd/compile/internal/types2/operand.go b/src/cmd/compile/internal/types2/operand.go index 81f46af5351..cd9e9f3575f 100644 --- a/src/cmd/compile/internal/types2/operand.go +++ b/src/cmd/compile/internal/types2/operand.go @@ -194,7 +194,7 @@ func operandString(x *operand, qf Qualifier) string { what := compositeKind(x.typ) if what == "" { // x.typ must be basic type - what = under(x.typ).(*Basic).name + what = x.typ.Underlying().(*Basic).name } desc += what + " " } @@ -229,7 +229,7 @@ func operandString(x *operand, qf Qualifier) string { // ("array", "slice", etc.) or the empty string if typ is not // composite but a basic type. func compositeKind(typ Type) string { - switch under(typ).(type) { + switch typ.Underlying().(type) { case *Basic: return "" case *Array: @@ -319,8 +319,8 @@ func (x *operand) assignableTo(check *Checker, T Type, cause *string) (bool, Cod return true, 0 } - Vu := under(V) - Tu := under(T) + Vu := V.Underlying() + Tu := T.Underlying() Vp, _ := V.(*TypeParam) Tp, _ := T.(*TypeParam) diff --git a/src/cmd/compile/internal/types2/predicates.go b/src/cmd/compile/internal/types2/predicates.go index c157672ba58..b0578c2991c 100644 --- a/src/cmd/compile/internal/types2/predicates.go +++ b/src/cmd/compile/internal/types2/predicates.go @@ -28,11 +28,11 @@ func isString(t Type) bool { return isBasic(t, IsString) } func isIntegerOrFloat(t Type) bool { return isBasic(t, IsInteger|IsFloat) } func isConstType(t Type) bool { return isBasic(t, IsConstType) } -// isBasic reports whether under(t) is a basic type with the specified info. +// isBasic reports whether t.Underlying() is a basic type with the specified info. // If t is a type parameter the result is false; i.e., // isBasic does not look inside a type parameter. func isBasic(t Type, info BasicInfo) bool { - u, _ := under(t).(*Basic) + u, _ := t.Underlying().(*Basic) return u != nil && u.info&info != 0 } @@ -48,7 +48,7 @@ func allString(t Type) bool { return allBasic(t, IsString) } func allOrdered(t Type) bool { return allBasic(t, IsOrdered) } func allNumericOrString(t Type) bool { return allBasic(t, IsNumeric|IsString) } -// allBasic reports whether under(t) is a basic type with the specified info. +// allBasic reports whether t.Underlying() is a basic type with the specified info. // If t is a type parameter, the result is true if isBasic(t, info) is true // for all specific types of the type parameter's type set. func allBasic(t Type, info BasicInfo) bool { @@ -85,7 +85,7 @@ func isTypeLit(t Type) bool { // Safe to call from types that are not fully set up. func isTyped(t Type) bool { // Alias and named types cannot denote untyped types - // so there's no need to call Unalias or under, below. + // so there's no need to call Unalias or Underlying, below. b, _ := t.(*Basic) return b == nil || b.info&IsUntyped == 0 } @@ -100,14 +100,14 @@ func isUntyped(t Type) bool { // Safe to call from types that are not fully set up. func isUntypedNumeric(t Type) bool { // Alias and named types cannot denote untyped types - // so there's no need to call Unalias or under, below. + // so there's no need to call Unalias or Underlying, below. b, _ := t.(*Basic) return b != nil && b.info&IsUntyped != 0 && b.info&IsNumeric != 0 } // IsInterface reports whether t is an interface type. func IsInterface(t Type) bool { - _, ok := under(t).(*Interface) + _, ok := t.Underlying().(*Interface) return ok } @@ -163,7 +163,7 @@ func comparableType(T Type, dynamic bool, seen map[Type]bool) *typeError { } seen[T] = true - switch t := under(T).(type) { + switch t := T.Underlying().(type) { case *Basic: // assume invalid types to be comparable to avoid follow-up errors if t.kind == UntypedNil { @@ -206,7 +206,7 @@ func comparableType(T Type, dynamic bool, seen map[Type]bool) *typeError { // hasNil reports whether type t includes the nil value. func hasNil(t Type) bool { - switch u := under(t).(type) { + switch u := t.Underlying().(type) { case *Basic: return u.kind == UnsafePointer case *Slice, *Pointer, *Signature, *Map, *Chan: @@ -519,7 +519,7 @@ func identicalInstance(xorig Type, xargs []Type, yorig Type, yargs []Type) bool // for untyped nil is untyped nil. func Default(t Type) Type { // Alias and named types cannot denote untyped types - // so there's no need to call Unalias or under, below. + // so there's no need to call Unalias or Underlying, below. if t, _ := t.(*Basic); t != nil { switch t.kind { case UntypedBool: diff --git a/src/cmd/compile/internal/types2/range.go b/src/cmd/compile/internal/types2/range.go index b654601eafc..899f5c09911 100644 --- a/src/cmd/compile/internal/types2/range.go +++ b/src/cmd/compile/internal/types2/range.go @@ -35,7 +35,7 @@ func (check *Checker) rangeStmt(inner stmtContext, rangeStmt *syntax.ForStmt, no check.expr(nil, &x, rangeVar) if isTypes2 && x.mode != invalid && sValue == nil && !check.hasCallOrRecv { - if t, ok := arrayPtrDeref(under(x.typ)).(*Array); ok { + if t, ok := arrayPtrDeref(x.typ.Underlying()).(*Array); ok { for { // Put constant info on the thing inside parentheses. // That's where (*../noder/writer).expr expects it. diff --git a/src/cmd/compile/internal/types2/resolver.go b/src/cmd/compile/internal/types2/resolver.go index 9d8769b96f7..4c9eeb329c8 100644 --- a/src/cmd/compile/internal/types2/resolver.go +++ b/src/cmd/compile/internal/types2/resolver.go @@ -508,6 +508,19 @@ func (check *Checker) collectObjects() { } } +// sortObjects sorts package-level objects by source-order for reproducible processing +func (check *Checker) sortObjects() { + check.objList = make([]Object, len(check.objMap)) + i := 0 + for obj := range check.objMap { + check.objList[i] = obj + i++ + } + slices.SortFunc(check.objList, func(a, b Object) int { + return cmp.Compare(a.order(), b.order()) + }) +} + // unpackRecv unpacks a receiver type expression and returns its components: ptr indicates // whether rtyp is a pointer receiver, base is the receiver base type expression stripped // of its type parameters (if any), and tparams are its type parameter names, if any. The @@ -626,19 +639,8 @@ func (check *Checker) resolveBaseTypeName(ptr bool, name *syntax.Name) (ptr_ boo // packageObjects typechecks all package objects, but not function bodies. func (check *Checker) packageObjects() { - // process package objects in source order for reproducible results - objList := make([]Object, len(check.objMap)) - i := 0 - for obj := range check.objMap { - objList[i] = obj - i++ - } - slices.SortFunc(objList, func(a, b Object) int { - return cmp.Compare(a.order(), b.order()) - }) - // add new methods to already type-checked types (from a prior Checker.Files call) - for _, obj := range objList { + for _, obj := range check.objList { if obj, _ := obj.(*TypeName); obj != nil && obj.typ != nil { check.collectMethods(obj) } @@ -661,7 +663,7 @@ func (check *Checker) packageObjects() { // its Type is Invalid. // // Investigate and reenable this branch. - for _, obj := range objList { + for _, obj := range check.objList { check.objDecl(obj, nil) } } else { @@ -673,7 +675,7 @@ func (check *Checker) packageObjects() { var aliasList []*TypeName var othersList []Object // everything that's not a type // phase 1: non-alias type declarations - for _, obj := range objList { + for _, obj := range check.objList { if tname, _ := obj.(*TypeName); tname != nil { if check.objMap[tname].tdecl.Alias { aliasList = append(aliasList, tname) diff --git a/src/cmd/compile/internal/types2/signature.go b/src/cmd/compile/internal/types2/signature.go index ea1cfd88cc6..ea60254fa68 100644 --- a/src/cmd/compile/internal/types2/signature.go +++ b/src/cmd/compile/internal/types2/signature.go @@ -203,7 +203,7 @@ func (check *Checker) collectRecv(rparam *syntax.Field, scopePos syntax.Pos) (*V case *Alias: // Methods on generic aliases are not permitted. // Only report an error if the alias type is valid. - if isValid(unalias(t)) { + if isValid(t) { check.errorf(rbase, InvalidRecv, "cannot define new methods on generic alias type %s", t) } // Ok to continue but do not set basetype in this case so that @@ -439,7 +439,7 @@ func (check *Checker) validRecv(pos poser, recv *Var) { break } var cause string - switch u := T.under().(type) { + switch u := T.Underlying().(type) { case *Basic: // unsafe.Pointer is treated like a regular pointer if u.kind == UnsafePointer { diff --git a/src/cmd/compile/internal/types2/sizeof_test.go b/src/cmd/compile/internal/types2/sizeof_test.go index d435c049c5b..092e82318a3 100644 --- a/src/cmd/compile/internal/types2/sizeof_test.go +++ b/src/cmd/compile/internal/types2/sizeof_test.go @@ -15,9 +15,9 @@ func TestSizeof(t *testing.T) { const _64bit = ^uint(0)>>32 != 0 var tests = []struct { - val interface{} // type as a value - _32bit uintptr // size on 32bit platforms - _64bit uintptr // size on 64bit platforms + val any // type as a value + _32bit uintptr // size on 32bit platforms + _64bit uintptr // size on 64bit platforms }{ // Types {Basic{}, 16, 32}, @@ -31,7 +31,7 @@ func TestSizeof(t *testing.T) { {Interface{}, 40, 80}, {Map{}, 16, 32}, {Chan{}, 12, 24}, - {Named{}, 60, 112}, + {Named{}, 64, 120}, {TypeParam{}, 28, 48}, {term{}, 12, 24}, diff --git a/src/cmd/compile/internal/types2/sizes.go b/src/cmd/compile/internal/types2/sizes.go index 7b1c00b40ab..534ecfba35c 100644 --- a/src/cmd/compile/internal/types2/sizes.go +++ b/src/cmd/compile/internal/types2/sizes.go @@ -54,7 +54,7 @@ func (s *StdSizes) Alignof(T Type) (result int64) { // For arrays and structs, alignment is defined in terms // of alignment of the elements and fields, respectively. - switch t := under(T).(type) { + switch t := T.Underlying().(type) { case *Array: // spec: "For a variable x of array type: unsafe.Alignof(x) // is the same as unsafe.Alignof(x[0]), but at least 1." @@ -162,7 +162,7 @@ var basicSizes = [...]byte{ } func (s *StdSizes) Sizeof(T Type) int64 { - switch t := under(T).(type) { + switch t := T.Underlying().(type) { case *Basic: assert(isTyped(T)) k := t.kind @@ -307,7 +307,7 @@ func (conf *Config) offsetsof(T *Struct) []int64 { func (conf *Config) offsetof(T Type, index []int) int64 { var offs int64 for _, i := range index { - s := under(T).(*Struct) + s := T.Underlying().(*Struct) d := conf.offsetsof(s)[i] if d < 0 { return -1 diff --git a/src/cmd/compile/internal/types2/stdlib_test.go b/src/cmd/compile/internal/types2/stdlib_test.go index 26d2eb23abe..ad1974ad85f 100644 --- a/src/cmd/compile/internal/types2/stdlib_test.go +++ b/src/cmd/compile/internal/types2/stdlib_test.go @@ -462,7 +462,7 @@ func pkgFilenames(dir string, includeTest bool) ([]string, error) { return filenames, nil } -func walkPkgDirs(dir string, pkgh func(dir string, filenames []string), errh func(args ...interface{})) { +func walkPkgDirs(dir string, pkgh func(dir string, filenames []string), errh func(args ...any)) { w := walker{pkgh, errh} w.walk(dir) } diff --git a/src/cmd/compile/internal/types2/stmt.go b/src/cmd/compile/internal/types2/stmt.go index efe9c99d876..47ca4d90ec7 100644 --- a/src/cmd/compile/internal/types2/stmt.go +++ b/src/cmd/compile/internal/types2/stmt.go @@ -192,7 +192,7 @@ func (check *Checker) suspendedCall(keyword string, call syntax.Expr) { } // goVal returns the Go value for val, or nil. -func goVal(val constant.Value) interface{} { +func goVal(val constant.Value) any { // val should exist, but be conservative and check if val == nil { return nil @@ -226,7 +226,7 @@ func goVal(val constant.Value) interface{} { // types we need to also check the value's types (e.g., byte(1) vs myByte(1)) // when the switch expression is of interface type. type ( - valueMap map[interface{}][]valueType // underlying Go value -> valueType + valueMap map[any][]valueType // underlying Go value -> valueType valueType struct { pos syntax.Pos typ Type diff --git a/src/cmd/compile/internal/types2/struct.go b/src/cmd/compile/internal/types2/struct.go index f5cdc472f77..99b75332e8f 100644 --- a/src/cmd/compile/internal/types2/struct.go +++ b/src/cmd/compile/internal/types2/struct.go @@ -141,12 +141,12 @@ func (check *Checker) structType(styp *Struct, e *syntax.StructType) { // Because we have a name, typ must be of the form T or *T, where T is the name // of a (named or alias) type, and t (= deref(typ)) must be the type of T. // We must delay this check to the end because we don't want to instantiate - // (via under(t)) a possibly incomplete type. + // (via t.Underlying()) a possibly incomplete type. embeddedTyp := typ // for closure below embeddedPos := pos check.later(func() { t, isPtr := deref(embeddedTyp) - switch u := under(t).(type) { + switch u := t.Underlying().(type) { case *Basic: if !isValid(t) { // error was reported before diff --git a/src/cmd/compile/internal/types2/typeparam.go b/src/cmd/compile/internal/types2/typeparam.go index c60b5eb4172..2ffa00c32c7 100644 --- a/src/cmd/compile/internal/types2/typeparam.go +++ b/src/cmd/compile/internal/types2/typeparam.go @@ -113,7 +113,7 @@ func (t *TypeParam) iface() *Interface { // determine constraint interface var ityp *Interface - switch u := under(bound).(type) { + switch u := bound.Underlying().(type) { case *Basic: if !isValid(u) { // error is reported elsewhere diff --git a/src/cmd/compile/internal/types2/typeset.go b/src/cmd/compile/internal/types2/typeset.go index ce487e74f72..fafe6f368ba 100644 --- a/src/cmd/compile/internal/types2/typeset.go +++ b/src/cmd/compile/internal/types2/typeset.go @@ -114,13 +114,13 @@ func (s *_TypeSet) all(f func(t, u Type) bool) bool { for _, t := range s.terms { assert(t.typ != nil) - // Unalias(x) == under(x) for ~x terms + // Unalias(x) == x.Underlying() for ~x terms u := Unalias(t.typ) if !t.tilde { - u = under(u) + u = u.Underlying() } if debug { - assert(Identical(u, under(u))) + assert(Identical(u, u.Underlying())) } if !f(t.typ, u) { return false @@ -264,7 +264,7 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_ } var comparable bool var terms termlist - switch u := under(typ).(type) { + switch u := typ.Underlying().(type) { case *Interface: // For now we don't permit type parameters as constraints. assert(!isTypeParam(typ)) @@ -380,7 +380,7 @@ func computeUnionTypeSet(check *Checker, unionSets map[*Union]*_TypeSet, pos syn var allTerms termlist for _, t := range utyp.terms { var terms termlist - u := under(t.typ) + u := t.typ.Underlying() if ui, _ := u.(*Interface); ui != nil { // For now we don't permit type parameters as constraints. assert(!isTypeParam(t.typ)) diff --git a/src/cmd/compile/internal/types2/typeset_test.go b/src/cmd/compile/internal/types2/typeset_test.go index 40ca28e525f..bcff2489306 100644 --- a/src/cmd/compile/internal/types2/typeset_test.go +++ b/src/cmd/compile/internal/types2/typeset_test.go @@ -64,7 +64,7 @@ func TestTypeSetString(t *testing.T) { if obj == nil { t.Fatalf("%s: T not found (invalid test case)", body) } - T, ok := under(obj.Type()).(*Interface) + T, ok := obj.Type().Underlying().(*Interface) if !ok { t.Fatalf("%s: %v is not an interface (invalid test case)", body, obj) } diff --git a/src/cmd/compile/internal/types2/typestring.go b/src/cmd/compile/internal/types2/typestring.go index 47f53bc12d7..b1f0d0929ba 100644 --- a/src/cmd/compile/internal/types2/typestring.go +++ b/src/cmd/compile/internal/types2/typestring.go @@ -455,7 +455,7 @@ func (w *typeWriter) tuple(tup *Tuple, variadic bool) { } else { // special case: // append(s, "foo"...) leads to signature func([]byte, string...) - if t, _ := under(typ).(*Basic); t == nil || t.kind != String { + if t, _ := typ.Underlying().(*Basic); t == nil || t.kind != String { w.error("expected string type") continue } diff --git a/src/cmd/compile/internal/types2/typeterm.go b/src/cmd/compile/internal/types2/typeterm.go index 97791324e1e..cb11811d458 100644 --- a/src/cmd/compile/internal/types2/typeterm.go +++ b/src/cmd/compile/internal/types2/typeterm.go @@ -115,7 +115,7 @@ func (x *term) includes(t Type) bool { u := t if x.tilde { - u = under(u) + u = u.Underlying() } return Identical(x.typ, u) } @@ -155,11 +155,11 @@ func (x *term) disjoint(y *term) bool { } ux := x.typ if y.tilde { - ux = under(ux) + ux = ux.Underlying() } uy := y.typ if x.tilde { - uy = under(uy) + uy = uy.Underlying() } return !Identical(ux, uy) } diff --git a/src/cmd/compile/internal/types2/typexpr.go b/src/cmd/compile/internal/types2/typexpr.go index 8accc46751f..8601ce62776 100644 --- a/src/cmd/compile/internal/types2/typexpr.go +++ b/src/cmd/compile/internal/types2/typexpr.go @@ -169,11 +169,11 @@ func (check *Checker) validVarType(e syntax.Expr, typ Type) { return } - // We don't want to call under() or complete interfaces while we are in + // We don't want to call typ.Underlying() or complete interfaces while we are in // the middle of type-checking parameter declarations that might belong // to interface methods. Delay this check to the end of type-checking. check.later(func() { - if t, _ := under(typ).(*Interface); t != nil { + if t, _ := typ.Underlying().(*Interface); t != nil { pos := syntax.StartPos(e) tset := computeInterfaceTypeSet(check, pos, t) // TODO(gri) is this the correct position? if !tset.IsMethodSet() { @@ -239,7 +239,7 @@ func (check *Checker) typInternal(e0 syntax.Expr, def *TypeName) (T Type) { check.indent-- var under Type if T != nil { - // Calling under() here may lead to endless instantiations. + // Calling T.Underlying() here may lead to endless instantiations. // Test case: type T[P any] *T[P] under = safeUnderlying(T) } @@ -425,7 +425,7 @@ func setDefType(def *TypeName, typ Type) { case *Basic: assert(t == Typ[Invalid]) case *Named: - t.underlying = typ + t.fromRHS = typ default: panic(fmt.Sprintf("unexpected type %T", t)) } diff --git a/src/cmd/compile/internal/types2/under.go b/src/cmd/compile/internal/types2/under.go index 078ba9ab172..98c62733c7c 100644 --- a/src/cmd/compile/internal/types2/under.go +++ b/src/cmd/compile/internal/types2/under.go @@ -6,19 +6,8 @@ package types2 import "iter" -// under returns the true expanded underlying type. -// If it doesn't exist, the result is Typ[Invalid]. -// under must only be called when a type is known -// to be fully set up. -func under(t Type) Type { - if t := asNamed(t); t != nil { - return t.under() - } - return t.Underlying() -} - // If typ is a type parameter, underIs returns the result of typ.underIs(f). -// Otherwise, underIs returns the result of f(under(typ)). +// Otherwise, underIs returns the result of f(typ.Underlying()). func underIs(typ Type, f func(Type) bool) bool { return all(typ, func(_, u Type) bool { return f(u) @@ -31,7 +20,7 @@ func all(t Type, f func(t, u Type) bool) bool { if p, _ := Unalias(t).(*TypeParam); p != nil { return p.typeset(f) } - return f(t, under(t)) + return f(t, t.Underlying()) } // typeset is an iterator over the (type/underlying type) pairs of the diff --git a/src/cmd/compile/internal/types2/unify.go b/src/cmd/compile/internal/types2/unify.go index 9cd3af86071..7250d82478e 100644 --- a/src/cmd/compile/internal/types2/unify.go +++ b/src/cmd/compile/internal/types2/unify.go @@ -141,7 +141,7 @@ func (u *unifier) unify(x, y Type, mode unifyMode) bool { return u.nify(x, y, mode, nil) } -func (u *unifier) tracef(format string, args ...interface{}) { +func (u *unifier) tracef(format string, args ...any) { fmt.Println(strings.Repeat(". ", u.depth) + sprintf(nil, true, format, args...)) } @@ -270,7 +270,7 @@ func (u *unifier) inferred(tparams []*TypeParam) []Type { // it is a non-type parameter interface. Otherwise it returns nil. func asInterface(x Type) (i *Interface) { if _, ok := Unalias(x).(*TypeParam); !ok { - i, _ = under(x).(*Interface) + i, _ = x.Underlying().(*Interface) } return i } @@ -339,7 +339,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) { if traceInference { u.tracef("%s ≡ under %s", x, ny) } - y = ny.under() + y = ny.Underlying() // Per the spec, a defined type cannot have an underlying type // that is a type parameter. assert(!isTypeParam(y)) @@ -430,7 +430,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) { u.set(px, y) default: // Neither x nor y are defined types. - if yc, _ := under(y).(*Chan); yc != nil && yc.dir != SendRecv { + if yc, _ := y.Underlying().(*Chan); yc != nil && yc.dir != SendRecv { // y is a directed channel type: select y. u.set(px, y) } @@ -779,7 +779,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) { } // If y is a defined type, it may not match against cx which // is an underlying type (incl. int, string, etc.). Use assign - // mode here so that the unifier automatically takes under(y) + // mode here so that the unifier automatically uses y.Underlying() // if necessary. return u.nify(cx, yorig, assign, p) } diff --git a/src/cmd/compile/internal/types2/union.go b/src/cmd/compile/internal/types2/union.go index 1bf4353f264..ab0bd43cd35 100644 --- a/src/cmd/compile/internal/types2/union.go +++ b/src/cmd/compile/internal/types2/union.go @@ -93,7 +93,7 @@ func parseUnion(check *Checker, uexpr syntax.Expr) Type { continue } - u := under(t.typ) + u := t.typ.Underlying() f, _ := u.(*Interface) if t.tilde { if f != nil { diff --git a/src/cmd/compile/internal/types2/universe.go b/src/cmd/compile/internal/types2/universe.go index c66caebd10c..332cd174f97 100644 --- a/src/cmd/compile/internal/types2/universe.go +++ b/src/cmd/compile/internal/types2/universe.go @@ -116,7 +116,7 @@ func defPredeclaredTypes() { { obj := NewTypeName(nopos, nil, "error", nil) obj.setColor(black) - typ := NewNamed(obj, nil, nil) + typ := (*Checker)(nil).newNamed(obj, nil, nil) // error.Error() string recv := newVar(RecvVar, nopos, nil, "", typ) @@ -128,7 +128,8 @@ func defPredeclaredTypes() { ityp := &Interface{methods: []*Func{err}, complete: true} computeInterfaceTypeSet(nil, nopos, ityp) // prevent races due to lazy computation of tset - typ.SetUnderlying(ityp) + typ.fromRHS = ityp + typ.Underlying() def(obj) } @@ -136,12 +137,13 @@ func defPredeclaredTypes() { { obj := NewTypeName(nopos, nil, "comparable", nil) obj.setColor(black) - typ := NewNamed(obj, nil, nil) + typ := (*Checker)(nil).newNamed(obj, nil, nil) // interface{} // marked as comparable ityp := &Interface{complete: true, tset: &_TypeSet{nil, allTermlist, true}} - typ.SetUnderlying(ityp) + typ.fromRHS = ityp + typ.Underlying() def(obj) } } diff --git a/src/cmd/compile/internal/types2/validtype.go b/src/cmd/compile/internal/types2/validtype.go index 32e389a6562..bec6412f861 100644 --- a/src/cmd/compile/internal/types2/validtype.go +++ b/src/cmd/compile/internal/types2/validtype.go @@ -91,13 +91,6 @@ func (check *Checker) validType0(pos syntax.Pos, typ Type, nest, path []*Named) // break // } - // Don't report a 2nd error if we already know the type is invalid - // (e.g., if a cycle was detected earlier, via under). - // Note: ensure that t.orig is fully resolved by calling Underlying(). - if !isValid(t.Underlying()) { - return false - } - // If the current type t is also found in nest, (the memory of) t is // embedded in itself, indicating an invalid recursive type. for _, e := range nest { @@ -125,8 +118,9 @@ func (check *Checker) validType0(pos syntax.Pos, typ Type, nest, path []*Named) // are not yet available to other goroutines). assert(t.obj.pkg == check.pkg) assert(t.Origin().obj.pkg == check.pkg) - t.underlying = Typ[Invalid] - t.Origin().underlying = Typ[Invalid] + + // let t become invalid when it is unpacked + t.Origin().fromRHS = Typ[Invalid] // Find the starting point of the cycle and report it. // Because each type in nest must also appear in path (see invariant below), @@ -147,7 +141,8 @@ func (check *Checker) validType0(pos syntax.Pos, typ Type, nest, path []*Named) // Every type added to nest is also added to path; thus every type that is in nest // must also be in path (invariant). But not every type in path is in nest, since // nest may be pruned (see below, *TypeParam case). - if !check.validType0(pos, t.Origin().fromRHS, append(nest, t), append(path, t)) { + t.Origin().unpack() + if !check.validType0(pos, t.Origin().rhs(), append(nest, t), append(path, t)) { return false } diff --git a/src/cmd/compile/internal/types2/version.go b/src/cmd/compile/internal/types2/version.go index 765b0f7e9ab..512285055c6 100644 --- a/src/cmd/compile/internal/types2/version.go +++ b/src/cmd/compile/internal/types2/version.go @@ -58,7 +58,7 @@ func (check *Checker) allowVersion(want goVersion) bool { // verifyVersionf is like allowVersion but also accepts a format string and arguments // which are used to report a version error if allowVersion returns false. -func (check *Checker) verifyVersionf(at poser, v goVersion, format string, args ...interface{}) bool { +func (check *Checker) verifyVersionf(at poser, v goVersion, format string, args ...any) bool { if !check.allowVersion(v) { check.versionErrorf(at, v, format, args...) return false diff --git a/src/cmd/compile/internal/walk/builtin.go b/src/cmd/compile/internal/walk/builtin.go index 974eb06886f..2f2a2c62f16 100644 --- a/src/cmd/compile/internal/walk/builtin.go +++ b/src/cmd/compile/internal/walk/builtin.go @@ -714,10 +714,14 @@ func walkPrint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { } case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64: on = typecheck.LookupRuntime("printint") - case types.TFLOAT32, types.TFLOAT64: - on = typecheck.LookupRuntime("printfloat") - case types.TCOMPLEX64, types.TCOMPLEX128: - on = typecheck.LookupRuntime("printcomplex") + case types.TFLOAT32: + on = typecheck.LookupRuntime("printfloat32") + case types.TFLOAT64: + on = typecheck.LookupRuntime("printfloat64") + case types.TCOMPLEX64: + on = typecheck.LookupRuntime("printcomplex64") + case types.TCOMPLEX128: + on = typecheck.LookupRuntime("printcomplex128") case types.TBOOL: on = typecheck.LookupRuntime("printbool") case types.TSTRING: diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index b9e226b2074..989ae0a1db2 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -704,27 +704,21 @@ func walkDivMod(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { // runtime calls late in SSA processing. if types.RegSize < 8 && (et == types.TINT64 || et == types.TUINT64) { if n.Y.Op() == ir.OLITERAL { - // Leave div/mod by constant powers of 2 or small 16-bit constants. + // Leave div/mod by non-zero uint64 constants. // The SSA backend will handle those. + // (Zero constants should have been rejected already, but we check just in case.) switch et { case types.TINT64: - c := ir.Int64Val(n.Y) - if c < 0 { - c = -c - } - if c != 0 && c&(c-1) == 0 { + if ir.Int64Val(n.Y) != 0 { return n } case types.TUINT64: - c := ir.Uint64Val(n.Y) - if c < 1<<16 { - return n - } - if c != 0 && c&(c-1) == 0 { + if ir.Uint64Val(n.Y) != 0 { return n } } } + // Build call to uint64div, uint64mod, int64div, or int64mod. var fn string if et == types.TINT64 { fn = "int64" diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go index d0aad088496..348880f622f 100644 --- a/src/cmd/compile/internal/x86/ssa.go +++ b/src/cmd/compile/internal/x86/ssa.go @@ -167,7 +167,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssa.Op386SBBL: opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg()) - case ssa.Op386ADDLcarry, ssa.Op386SUBLcarry: + case ssa.Op386ADDLcarry, ssa.Op386ADCLcarry, ssa.Op386SUBLcarry: // output 0 is carry/borrow, output 1 is the low 32 bits. opregreg(s, v.Op.Asm(), v.Reg0(), v.Args[1].Reg()) diff --git a/src/cmd/covdata/covdata.go b/src/cmd/covdata/covdata.go index 122ad28b5ca..b88b81799e6 100644 --- a/src/cmd/covdata/covdata.go +++ b/src/cmd/covdata/covdata.go @@ -42,14 +42,14 @@ func Exit(code int) { os.Exit(code) } -func dbgtrace(vlevel int, s string, a ...interface{}) { +func dbgtrace(vlevel int, s string, a ...any) { if *verbflag >= vlevel { fmt.Printf(s, a...) fmt.Printf("\n") } } -func warn(s string, a ...interface{}) { +func warn(s string, a ...any) { fmt.Fprintf(os.Stderr, "warning: ") fmt.Fprintf(os.Stderr, s, a...) fmt.Fprintf(os.Stderr, "\n") @@ -58,7 +58,7 @@ func warn(s string, a ...interface{}) { } } -func fatal(s string, a ...interface{}) { +func fatal(s string, a ...any) { fmt.Fprintf(os.Stderr, "error: ") fmt.Fprintf(os.Stderr, s, a...) fmt.Fprintf(os.Stderr, "\n") diff --git a/src/cmd/dist/build.go b/src/cmd/dist/build.go index 9a7951726f6..2b382a1c025 100644 --- a/src/cmd/dist/build.go +++ b/src/cmd/dist/build.go @@ -624,11 +624,6 @@ func mustLinkExternal(goos, goarch string, cgoEnabled bool) bool { // Internally linking cgo is incomplete on some architectures. // https://golang.org/issue/14449 return true - case "arm64": - if goos == "windows" { - // windows/arm64 internal linking is not implemented. - return true - } case "ppc64": // Big Endian PPC64 cgo internal linking is not implemented for aix or linux. if goos == "aix" || goos == "linux" { @@ -1397,7 +1392,7 @@ var ( binExesIncludedInDistpack = []string{"cmd/go", "cmd/gofmt"} // Keep in sync with the filter in cmd/distpack/pack.go. - toolsIncludedInDistpack = []string{"cmd/asm", "cmd/cgo", "cmd/compile", "cmd/cover", "cmd/link", "cmd/preprofile", "cmd/vet"} + toolsIncludedInDistpack = []string{"cmd/asm", "cmd/cgo", "cmd/compile", "cmd/cover", "cmd/fix", "cmd/link", "cmd/preprofile", "cmd/vet"} // We could install all tools in "cmd", but is unnecessary because we will // remove them in distpack, so instead install the tools that will actually diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go index 080de832b2a..62cd9376927 100644 --- a/src/cmd/dist/buildtool.go +++ b/src/cmd/dist/buildtool.go @@ -90,6 +90,7 @@ var bootstrapDirs = []string{ "internal/platform", "internal/profile", "internal/race", + "internal/runtime/gc", "internal/saferio", "internal/syscall/unix", "internal/types/errors", diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go index b9673564ec1..73ea5c4015a 100644 --- a/src/cmd/dist/test.go +++ b/src/cmd/dist/test.go @@ -1110,7 +1110,7 @@ func (t *tester) registerTest(heading string, test *goTest, opts ...registerTest // dirCmd constructs a Cmd intended to be run in the foreground. // The command will be run in dir, and Stdout and Stderr will go to os.Stdout // and os.Stderr. -func (t *tester) dirCmd(dir string, cmdline ...interface{}) *exec.Cmd { +func (t *tester) dirCmd(dir string, cmdline ...any) *exec.Cmd { bin, args := flattenCmdline(cmdline) cmd := exec.Command(bin, args...) if filepath.IsAbs(dir) { @@ -1128,7 +1128,7 @@ func (t *tester) dirCmd(dir string, cmdline ...interface{}) *exec.Cmd { // flattenCmdline flattens a mixture of string and []string as single list // and then interprets it as a command line: first element is binary, then args. -func flattenCmdline(cmdline []interface{}) (bin string, args []string) { +func flattenCmdline(cmdline []any) (bin string, args []string) { var list []string for _, x := range cmdline { switch x := x.(type) { @@ -1185,9 +1185,6 @@ func (t *tester) internalLink() bool { if goos == "ios" { return false } - if goos == "windows" && goarch == "arm64" { - return false - } // Internally linking cgo is incomplete on some architectures. // https://golang.org/issue/10373 // https://golang.org/issue/14449 @@ -1214,7 +1211,7 @@ func (t *tester) internalLinkPIE() bool { case "darwin-amd64", "darwin-arm64", "linux-amd64", "linux-arm64", "linux-loong64", "linux-ppc64le", "android-arm64", - "windows-amd64", "windows-386": + "windows-amd64", "windows-386", "windows-arm64": return true } return false diff --git a/src/cmd/dist/util.go b/src/cmd/dist/util.go index 121c2dc62cf..1109bf0efd7 100644 --- a/src/cmd/dist/util.go +++ b/src/cmd/dist/util.go @@ -21,7 +21,7 @@ import ( // pathf is fmt.Sprintf for generating paths // (on windows it turns / into \ after the printf). -func pathf(format string, args ...interface{}) string { +func pathf(format string, args ...any) string { return filepath.Clean(fmt.Sprintf(format, args...)) } @@ -324,7 +324,7 @@ func xworkdir() string { } // fatalf prints an error message to standard error and exits. -func fatalf(format string, args ...interface{}) { +func fatalf(format string, args ...any) { fmt.Fprintf(os.Stderr, "go tool dist: %s\n", fmt.Sprintf(format, args...)) dieOnce.Do(func() { close(dying) }) @@ -353,12 +353,12 @@ func xatexit(f func()) { } // xprintf prints a message to standard output. -func xprintf(format string, args ...interface{}) { +func xprintf(format string, args ...any) { fmt.Printf(format, args...) } // errprintf prints a message to standard output. -func errprintf(format string, args ...interface{}) { +func errprintf(format string, args ...any) { fmt.Fprintf(os.Stderr, format, args...) } diff --git a/src/cmd/distpack/pack.go b/src/cmd/distpack/pack.go index 6bab45f1d3d..09c3a331195 100644 --- a/src/cmd/distpack/pack.go +++ b/src/cmd/distpack/pack.go @@ -172,7 +172,7 @@ func main() { default: return false // Keep in sync with toolsIncludedInDistpack in cmd/dist/build.go. - case "asm", "cgo", "compile", "cover", "link", "preprofile", "vet": + case "asm", "cgo", "compile", "cover", "fix", "link", "preprofile", "vet": } } return true diff --git a/src/cmd/fix/doc.go b/src/cmd/fix/doc.go deleted file mode 100644 index b3d69144717..00000000000 --- a/src/cmd/fix/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Fix finds Go programs that use old APIs and rewrites them to use -newer ones. After you update to a new Go release, fix helps make -the necessary changes to your programs. - -Usage: - - go tool fix [ignored...] - -This tool is currently in transition. All its historical fixers were -long obsolete and have been removed, so it is currently a no-op. In -due course the tool will integrate with the Go analysis framework -(golang.org/x/tools/go/analysis) and run a modern suite of fix -algorithms; see https://go.dev/issue/71859. -*/ -package main diff --git a/src/cmd/fix/main.go b/src/cmd/fix/main.go index 87cc0d64146..8fc412fe29e 100644 --- a/src/cmd/fix/main.go +++ b/src/cmd/fix/main.go @@ -1,31 +1,63 @@ -// Copyright 2011 The Go Authors. All rights reserved. +// Copyright 2025 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +/* +Fix is a tool executed by "go fix" to update Go programs that use old +features of the language and library and rewrite them to use newer +ones. After you update to a new Go release, fix helps make the +necessary changes to your programs. + +See the documentation for "go fix" for how to run this command. +You can provide an alternative tool using "go fix -fixtool=..." + +Run "go tool fix help" to see the list of analyzers supported by this +program. + +See [golang.org/x/tools/go/analysis] for information on how to write +an analyzer that can suggest fixes. +*/ package main import ( - "flag" - "fmt" - "os" -) + "cmd/internal/objabi" + "cmd/internal/telemetry/counter" + "slices" -var ( - _ = flag.Bool("diff", false, "obsolete, no effect") - _ = flag.String("go", "", "obsolete, no effect") - _ = flag.String("r", "", "obsolete, no effect") - _ = flag.String("force", "", "obsolete, no effect") + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildtag" + "golang.org/x/tools/go/analysis/passes/hostport" + "golang.org/x/tools/go/analysis/passes/inline" + "golang.org/x/tools/go/analysis/passes/modernize" + "golang.org/x/tools/go/analysis/unitchecker" ) -func usage() { - fmt.Fprintf(os.Stderr, "usage: go tool fix [-diff] [-r ignored] [-force ignored] ...\n") - flag.PrintDefaults() - os.Exit(2) -} - func main() { - flag.Usage = usage - flag.Parse() + // Keep consistent with cmd/vet/main.go! + counter.Open() + objabi.AddVersionFlag() + counter.Inc("fix/invocations") - os.Exit(0) + unitchecker.Main(suite...) // (never returns) } + +// The fix suite analyzers produce fixes are unambiguously safe to apply, +// even if the diagnostics might not describe actual problems. +var suite = slices.Concat( + []*analysis.Analyzer{ + buildtag.Analyzer, + hostport.Analyzer, + inline.Analyzer, + }, + modernize.Suite, + // TODO(adonovan): add any other vet analyzers whose fixes are always safe. + // Candidates to audit: sigchanyzer, printf, assign, unreachable. + // Many of staticcheck's analyzers would make good candidates + // (e.g. rewriting WriteString(fmt.Sprintf()) to Fprintf.) + // Rejected: + // - composites: some types (e.g. PointXY{1,2}) don't want field names. + // - timeformat: flipping MM/DD is a behavior change, but the code + // could potentially be a workaround for another bug. + // - stringintconv: offers two fixes, user input required to choose. + // - fieldalignment: poor signal/noise; fix could be a regression. +) diff --git a/src/cmd/go.mod b/src/cmd/go.mod index 017883a7870..42d510c34fe 100644 --- a/src/cmd/go.mod +++ b/src/cmd/go.mod @@ -4,18 +4,18 @@ go 1.26 require ( github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 - golang.org/x/arch v0.20.1-0.20250808194827-46ba08e3ae58 + golang.org/x/arch v0.22.1-0.20251016010524-fea4a9ec4938 golang.org/x/build v0.0.0-20250806225920-b7c66c047964 - golang.org/x/mod v0.28.0 + golang.org/x/mod v0.29.0 golang.org/x/sync v0.17.0 - golang.org/x/sys v0.36.0 - golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053 + golang.org/x/sys v0.37.0 + golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 golang.org/x/term v0.34.0 - golang.org/x/tools v0.37.1-0.20250924232827-4df13e317ce4 + golang.org/x/tools v0.38.1-0.20251015192825-7d9453ccc0f5 ) require ( github.com/ianlancetaylor/demangle v0.0.0-20250417193237-f615e6bd150b // indirect - golang.org/x/text v0.29.0 // indirect + golang.org/x/text v0.30.0 // indirect rsc.io/markdown v0.0.0-20240306144322-0bf8f97ee8ef // indirect ) diff --git a/src/cmd/go.sum b/src/cmd/go.sum index 0906ffcc605..0a09e6e401a 100644 --- a/src/cmd/go.sum +++ b/src/cmd/go.sum @@ -6,23 +6,23 @@ github.com/ianlancetaylor/demangle v0.0.0-20250417193237-f615e6bd150b h1:ogbOPx8 github.com/ianlancetaylor/demangle v0.0.0-20250417193237-f615e6bd150b/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= github.com/yuin/goldmark v1.6.0 h1:boZcn2GTjpsynOsC0iJHnBWa4Bi0qzfJjthwauItG68= github.com/yuin/goldmark v1.6.0/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -golang.org/x/arch v0.20.1-0.20250808194827-46ba08e3ae58 h1:uxPa6+/WsUfzikIAPMqpTho10y4qtYpINBurU+6NrHE= -golang.org/x/arch v0.20.1-0.20250808194827-46ba08e3ae58/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= +golang.org/x/arch v0.22.1-0.20251016010524-fea4a9ec4938 h1:VJ182b/ajNehMFRltVfCh/FR0jAH+QX6hs9zqYod/mU= +golang.org/x/arch v0.22.1-0.20251016010524-fea4a9ec4938/go.mod h1:dNHoOeKiyja7GTvF9NJS1l3Z2yntpQNzgrjh1cU103A= golang.org/x/build v0.0.0-20250806225920-b7c66c047964 h1:yRs1K51GKq7hsIO+YHJ8LsslrvwFceNPIv0tYjpcBd0= golang.org/x/build v0.0.0-20250806225920-b7c66c047964/go.mod h1:i9Vx7+aOQUpYJRxSO+OpRStVBCVL/9ccI51xblWm5WY= -golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U= -golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= -golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053 h1:dHQOQddU4YHS5gY33/6klKjq7Gp3WwMyOXGNp5nzRj8= -golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053/go.mod h1:+nZKN+XVh4LCiA9DV3ywrzN4gumyCnKjau3NGb9SGoE= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 h1:LvzTn0GQhWuvKH/kVRS3R3bVAsdQWI7hvfLHGgh9+lU= +golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8/go.mod h1:Pi4ztBfryZoJEkyFTI5/Ocsu2jXyDr6iSdgJiYE/uwE= golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= -golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= -golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= -golang.org/x/tools v0.37.1-0.20250924232827-4df13e317ce4 h1:IcXDtHggZZo+GzNzvVRPyNFLnOc2/Z1gg3ZVIWF2uCU= -golang.org/x/tools v0.37.1-0.20250924232827-4df13e317ce4/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= +golang.org/x/tools v0.38.1-0.20251015192825-7d9453ccc0f5 h1:cz7f45KGWAtyIrz6bm45Gc+lw8beIxBSW3EQh4Bwbg4= +golang.org/x/tools v0.38.1-0.20251015192825-7d9453ccc0f5/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= rsc.io/markdown v0.0.0-20240306144322-0bf8f97ee8ef h1:mqLYrXCXYEZOop9/Dbo6RPX11539nwiCNBb1icVPmw8= rsc.io/markdown v0.0.0-20240306144322-0bf8f97ee8ef/go.mod h1:8xcPgWmwlZONN1D9bjxtHEjrUtSEa3fakVF8iaewYKQ= diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go index 19b48f0579b..fe9b8620736 100644 --- a/src/cmd/go/alldocs.go +++ b/src/cmd/go/alldocs.go @@ -18,7 +18,7 @@ // clean remove object files and cached files // doc show documentation for package or symbol // env print Go environment information -// fix update packages to use new APIs +// fix apply fixes suggested by static checkers // fmt gofmt (reformat) package sources // generate generate Go files by processing source // get add dependencies to current module and install them @@ -495,22 +495,33 @@ // // For more about environment variables, see 'go help environment'. // -// # Update packages to use new APIs +// # Apply fixes suggested by static checkers // // Usage: // -// go fix [-fix list] [packages] +// go fix [build flags] [-fixtool prog] [fix flags] [packages] // -// Fix runs the Go fix command on the packages named by the import paths. +// Fix runs the Go fix tool (cmd/fix) on the named packages +// and applies suggested fixes. // -// The -fix flag sets a comma-separated list of fixes to run. -// The default is all known fixes. -// (Its value is passed to 'go tool fix -r'.) +// It supports these flags: +// +// -diff +// instead of applying each fix, print the patch as a unified diff +// +// The -fixtool=prog flag selects a different analysis tool with +// alternative or additional fixers; see the documentation for go vet's +// -vettool flag for details. +// +// The default fix tool is 'go tool fix' or cmd/fix. +// For help on its fixers and their flags, run 'go tool fix help'. +// For details of a specific fixer such as 'hostport', see 'go tool fix help hostport'. // -// For more about fix, see 'go doc cmd/fix'. // For more about specifying packages, see 'go help packages'. // -// To run fix with other options, run 'go tool fix'. +// The build flags supported by go fix are those that control package resolution +// and execution, such as -C, -n, -x, -v, -tags, and -toolexec. +// For more about these flags, see 'go help build'. // // See also: go fmt, go vet. // @@ -1942,7 +1953,7 @@ // Also emits build output in JSON. See 'go help buildjson'. // // -o file -// Compile the test binary to the named file. +// Save a copy of the test binary to the named file. // The test still runs (unless -c or -i is specified). // If file ends in a slash or names an existing directory, // the test is written to pkg.test in that directory. @@ -2014,20 +2025,36 @@ // // go vet [build flags] [-vettool prog] [vet flags] [packages] // -// Vet runs the Go vet command on the packages named by the import paths. +// Vet runs the Go vet tool (cmd/vet) on the named packages +// and reports diagnostics. // -// For more about vet and its flags, see 'go doc cmd/vet'. -// For more about specifying packages, see 'go help packages'. -// For a list of checkers and their flags, see 'go tool vet help'. -// For details of a specific checker such as 'printf', see 'go tool vet help printf'. +// It supports these flags: // -// The -vettool=prog flag selects a different analysis tool with alternative -// or additional checks. -// For example, the 'shadow' analyzer can be built and run using these commands: +// -c int +// display offending line with this many lines of context (default -1) +// -json +// emit JSON output +// -fix +// instead of printing each diagnostic, apply its first fix (if any) +// -diff +// instead of applying each fix, print the patch as a unified diff +// +// The -vettool=prog flag selects a different analysis tool with +// alternative or additional checks. For example, the 'shadow' analyzer +// can be built and run using these commands: // // go install golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow@latest // go vet -vettool=$(which shadow) // +// Alternative vet tools should be built atop golang.org/x/tools/go/analysis/unitchecker, +// which handles the interaction with go vet. +// +// The default vet tool is 'go tool vet' or cmd/vet. +// For help on its checkers and their flags, run 'go tool vet help'. +// For details of a specific checker such as 'printf', see 'go tool vet help printf'. +// +// For more about specifying packages, see 'go help packages'. +// // The build flags supported by go vet are those that control package resolution // and execution, such as -C, -n, -x, -v, -tags, and -toolexec. // For more about these flags, see 'go help build'. @@ -2163,6 +2190,12 @@ // building the package for Windows; similarly, math_386.s will be included // only when building the package for 32-bit x86. // +// By convention, packages with assembly implementations may provide a go-only +// version under the "purego" build constraint. This does not limit the use of +// cgo (use the "cgo" build constraint) or unsafe. For example: +// +// //go:build purego +// // Go versions 1.16 and earlier used a different syntax for build constraints, // with a "// +build" prefix. The gofmt command will add an equivalent //go:build // constraint when encountering the older syntax. @@ -3244,6 +3277,10 @@ // The following flags are recognized by the 'go test' command and // control the execution of any test: // +// -artifacts +// Save test artifacts in the directory specified by -outputdir. +// See 'go doc testing.T.ArtifactDir'. +// // -bench regexp // Run only those benchmarks matching a regular expression. // By default, no benchmarks are run. @@ -3338,6 +3375,10 @@ // This will only list top-level tests. No subtest or subbenchmarks will be // shown. // +// -outputdir directory +// Place output files from profiling and test artifacts in the +// specified directory, by default the directory in which "go test" is running. +// // -parallel n // Allow parallel execution of test functions that call t.Parallel, and // fuzz targets that call t.Parallel when running the seed corpus. @@ -3449,10 +3490,6 @@ // Sample 1 in n stack traces of goroutines holding a // contended mutex. // -// -outputdir directory -// Place output files from profiling in the specified directory, -// by default the directory in which "go test" is running. -// // -trace trace.out // Write an execution trace to the specified file before exiting. // diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go index e4ee9bd1e8d..1722f1a2c34 100644 --- a/src/cmd/go/go_test.go +++ b/src/cmd/go/go_test.go @@ -1506,15 +1506,17 @@ func main() { tg.setenv("PKG_CONFIG_PATH", tg.path(".")) tg.run("run", tg.path("foo.go")) - if runtime.GOOS != "darwin" { // darwin doesn't like these ldflags - // test for ldflags - tg.tempFile("bar.pc", ` + libs := `Libs: -Wl,-rpath=/path\ with\ spaces/bin` + if runtime.GOOS == "darwin" { + libs = "" // darwin linker doesn't have -rpath + } + // test for ldflags + tg.tempFile("bar.pc", ` Name: bar Description: The bar library Version: 1.0.0 -Libs: -Wl,-rpath=/path\ with\ spaces/bin +`+libs+` `) - } tg.tempFile("bar.go", `package main /* diff --git a/src/cmd/go/internal/base/limit.go b/src/cmd/go/internal/base/limit.go index 4317432527a..a90b700a031 100644 --- a/src/cmd/go/internal/base/limit.go +++ b/src/cmd/go/internal/base/limit.go @@ -63,6 +63,12 @@ func AcquireNet() (release func(), err error) { <-netLimitSem } cleanup.Stop() + + // checker may be dead at the moment after we last access + // it in this function, so the cleanup can fire before Stop + // completes. Keep checker alive while we call Stop. See + // the documentation for runtime.Cleanup.Stop. + runtime.KeepAlive(checker) }, nil } diff --git a/src/cmd/go/internal/base/path.go b/src/cmd/go/internal/base/path.go index 5bb7bc3bde6..a7577f62e76 100644 --- a/src/cmd/go/internal/base/path.go +++ b/src/cmd/go/internal/base/path.go @@ -55,8 +55,7 @@ func sameFile(path1, path2 string) bool { // ShortPathError rewrites the path in err using base.ShortPath, if err is a wrapped PathError. func ShortPathError(err error) error { - var pe *fs.PathError - if errors.As(err, &pe) { + if pe, ok := errors.AsType[*fs.PathError](err); ok { pe.Path = ShortPath(pe.Path) } return err diff --git a/src/cmd/go/internal/bug/bug.go b/src/cmd/go/internal/bug/bug.go index 4ff45d2d888..749edc51cf1 100644 --- a/src/cmd/go/internal/bug/bug.go +++ b/src/cmd/go/internal/bug/bug.go @@ -21,6 +21,7 @@ import ( "cmd/go/internal/base" "cmd/go/internal/cfg" "cmd/go/internal/envcmd" + "cmd/go/internal/modload" "cmd/go/internal/web" "cmd/go/internal/work" ) @@ -41,16 +42,17 @@ func init() { } func runBug(ctx context.Context, cmd *base.Command, args []string) { + moduleLoaderState := modload.NewState() if len(args) > 0 { base.Fatalf("go: bug takes no arguments") } - work.BuildInit() + work.BuildInit(moduleLoaderState) var buf strings.Builder buf.WriteString(bugHeader) printGoVersion(&buf) buf.WriteString("### Does this issue reproduce with the latest release?\n\n\n") - printEnvDetails(&buf) + printEnvDetails(moduleLoaderState, &buf) buf.WriteString(bugFooter) body := buf.String() @@ -91,21 +93,21 @@ func printGoVersion(w io.Writer) { fmt.Fprintf(w, "\n") } -func printEnvDetails(w io.Writer) { +func printEnvDetails(loaderstate *modload.State, w io.Writer) { fmt.Fprintf(w, "### What operating system and processor architecture are you using (`go env`)?\n\n") fmt.Fprintf(w, "
    go env Output
    \n")
     	fmt.Fprintf(w, "$ go env\n")
    -	printGoEnv(w)
    +	printGoEnv(loaderstate, w)
     	printGoDetails(w)
     	printOSDetails(w)
     	printCDetails(w)
     	fmt.Fprintf(w, "
    \n\n") } -func printGoEnv(w io.Writer) { +func printGoEnv(loaderstate *modload.State, w io.Writer) { env := envcmd.MkEnv() - env = append(env, envcmd.ExtraEnvVars()...) - env = append(env, envcmd.ExtraEnvVarsCostly()...) + env = append(env, envcmd.ExtraEnvVars(loaderstate)...) + env = append(env, envcmd.ExtraEnvVarsCostly(loaderstate)...) envcmd.PrintEnv(w, env, false) } diff --git a/src/cmd/go/internal/clean/clean.go b/src/cmd/go/internal/clean/clean.go index c6f311e0263..51581c27e16 100644 --- a/src/cmd/go/internal/clean/clean.go +++ b/src/cmd/go/internal/clean/clean.go @@ -120,7 +120,8 @@ func init() { } func runClean(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() + moduleLoaderState := modload.NewState() + moduleLoaderState.InitWorkfile() if len(args) > 0 { cacheFlag := "" switch { @@ -142,13 +143,13 @@ func runClean(ctx context.Context, cmd *base.Command, args []string) { // either the flags and arguments explicitly imply a package, // or no other target (such as a cache) was requested to be cleaned. cleanPkg := len(args) > 0 || cleanI || cleanR - if (!modload.Enabled() || modload.HasModRoot()) && + if (!moduleLoaderState.Enabled() || moduleLoaderState.HasModRoot()) && !cleanCache && !cleanModcache && !cleanTestcache && !cleanFuzzcache { cleanPkg = true } if cleanPkg { - for _, pkg := range load.PackagesAndErrors(ctx, load.PackageOpts{}, args) { + for _, pkg := range load.PackagesAndErrors(moduleLoaderState, ctx, load.PackageOpts{}, args) { clean(pkg) } } diff --git a/src/cmd/go/internal/doc/pkgsite.go b/src/cmd/go/internal/doc/pkgsite.go index 06289ac4fc9..c173167b632 100644 --- a/src/cmd/go/internal/doc/pkgsite.go +++ b/src/cmd/go/internal/doc/pkgsite.go @@ -81,8 +81,7 @@ func doPkgsite(urlPath, fragment string) error { cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { - var ee *exec.ExitError - if errors.As(err, &ee) { + if ee, ok := errors.AsType[*exec.ExitError](err); ok { // Exit with the same exit status as pkgsite to avoid // printing of "exit status" error messages. // Any relevant messages have already been printed diff --git a/src/cmd/go/internal/envcmd/env.go b/src/cmd/go/internal/envcmd/env.go index 6ad6954dd52..d345a368632 100644 --- a/src/cmd/go/internal/envcmd/env.go +++ b/src/cmd/go/internal/envcmd/env.go @@ -189,16 +189,16 @@ func findEnv(env []cfg.EnvVar, name string) string { } // ExtraEnvVars returns environment variables that should not leak into child processes. -func ExtraEnvVars() []cfg.EnvVar { +func ExtraEnvVars(loaderstate *modload.State) []cfg.EnvVar { gomod := "" - modload.Init() - if modload.HasModRoot() { - gomod = modload.ModFilePath() - } else if modload.Enabled() { + modload.Init(loaderstate) + if loaderstate.HasModRoot() { + gomod = loaderstate.ModFilePath() + } else if loaderstate.Enabled() { gomod = os.DevNull } - modload.InitWorkfile() - gowork := modload.WorkFilePath() + loaderstate.InitWorkfile() + gowork := modload.WorkFilePath(loaderstate) // As a special case, if a user set off explicitly, report that in GOWORK. if cfg.Getenv("GOWORK") == "off" { gowork = "off" @@ -211,8 +211,8 @@ func ExtraEnvVars() []cfg.EnvVar { // ExtraEnvVarsCostly returns environment variables that should not leak into child processes // but are costly to evaluate. -func ExtraEnvVarsCostly() []cfg.EnvVar { - b := work.NewBuilder("") +func ExtraEnvVarsCostly(loaderstate *modload.State) []cfg.EnvVar { + b := work.NewBuilder("", loaderstate.VendorDirOrEmpty) defer func() { if err := b.Close(); err != nil { base.Fatal(err) @@ -272,6 +272,7 @@ func argKey(arg string) string { } func runEnv(ctx context.Context, cmd *base.Command, args []string) { + moduleLoaderState := modload.NewState() if *envJson && *envU { base.Fatalf("go: cannot use -json with -u") } @@ -306,7 +307,7 @@ func runEnv(ctx context.Context, cmd *base.Command, args []string) { } env := cfg.CmdEnv - env = append(env, ExtraEnvVars()...) + env = append(env, ExtraEnvVars(moduleLoaderState)...) if err := fsys.Init(); err != nil { base.Fatal(err) @@ -336,8 +337,8 @@ func runEnv(ctx context.Context, cmd *base.Command, args []string) { } } if needCostly { - work.BuildInit() - env = append(env, ExtraEnvVarsCostly()...) + work.BuildInit(moduleLoaderState) + env = append(env, ExtraEnvVarsCostly(moduleLoaderState)...) } if len(args) > 0 { diff --git a/src/cmd/go/internal/fix/fix.go b/src/cmd/go/internal/fix/fix.go deleted file mode 100644 index 8947da05c3e..00000000000 --- a/src/cmd/go/internal/fix/fix.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package fix implements the “go fix” command. -package fix - -import ( - "cmd/go/internal/base" - "cmd/go/internal/cfg" - "cmd/go/internal/load" - "cmd/go/internal/modload" - "cmd/go/internal/str" - "cmd/go/internal/work" - "context" - "fmt" - "go/build" - "os" - "path/filepath" -) - -var CmdFix = &base.Command{ - UsageLine: "go fix [-fix list] [packages]", - Short: "update packages to use new APIs", - Long: ` -Fix runs the Go fix command on the packages named by the import paths. - -The -fix flag sets a comma-separated list of fixes to run. -The default is all known fixes. -(Its value is passed to 'go tool fix -r'.) - -For more about fix, see 'go doc cmd/fix'. -For more about specifying packages, see 'go help packages'. - -To run fix with other options, run 'go tool fix'. - -See also: go fmt, go vet. - `, -} - -var fixes = CmdFix.Flag.String("fix", "", "comma-separated list of fixes to apply") - -func init() { - work.AddBuildFlags(CmdFix, work.OmitBuildOnlyFlags) - CmdFix.Run = runFix // fix cycle -} - -func runFix(ctx context.Context, cmd *base.Command, args []string) { - pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{}, args) - w := 0 - for _, pkg := range pkgs { - if pkg.Error != nil { - base.Errorf("%v", pkg.Error) - continue - } - pkgs[w] = pkg - w++ - } - pkgs = pkgs[:w] - - printed := false - for _, pkg := range pkgs { - if modload.Enabled() && pkg.Module != nil && !pkg.Module.Main { - if !printed { - fmt.Fprintf(os.Stderr, "go: not fixing packages in dependency modules\n") - printed = true - } - continue - } - // Use pkg.gofiles instead of pkg.Dir so that - // the command only applies to this package, - // not to packages in subdirectories. - files := base.RelPaths(pkg.InternalAllGoFiles()) - goVersion := "" - if pkg.Module != nil { - goVersion = "go" + pkg.Module.GoVersion - } else if pkg.Standard { - goVersion = build.Default.ReleaseTags[len(build.Default.ReleaseTags)-1] - } - var fixArg []string - if *fixes != "" { - fixArg = []string{"-r=" + *fixes} - } - base.Run(str.StringList(cfg.BuildToolexec, filepath.Join(cfg.GOROOTbin, "go"), "tool", "fix", "-go="+goVersion, fixArg, files)) - } -} diff --git a/src/cmd/go/internal/fmtcmd/fmt.go b/src/cmd/go/internal/fmtcmd/fmt.go index 62b22f6bcfa..fe356bdc081 100644 --- a/src/cmd/go/internal/fmtcmd/fmt.go +++ b/src/cmd/go/internal/fmtcmd/fmt.go @@ -50,6 +50,7 @@ See also: go fix, go vet. } func runFmt(ctx context.Context, cmd *base.Command, args []string) { + moduleLoaderState := modload.NewState() printed := false gofmt := gofmtPath() @@ -59,8 +60,8 @@ func runFmt(ctx context.Context, cmd *base.Command, args []string) { baseGofmtArgs := len(gofmtArgs) baseGofmtArgLen := gofmtArgLen - for _, pkg := range load.PackagesAndErrors(ctx, load.PackageOpts{}, args) { - if modload.Enabled() && pkg.Module != nil && !pkg.Module.Main { + for _, pkg := range load.PackagesAndErrors(moduleLoaderState, ctx, load.PackageOpts{}, args) { + if moduleLoaderState.Enabled() && pkg.Module != nil && !pkg.Module.Main { if !printed { fmt.Fprintf(os.Stderr, "go: not formatting packages in dependency modules\n") printed = true @@ -68,11 +69,10 @@ func runFmt(ctx context.Context, cmd *base.Command, args []string) { continue } if pkg.Error != nil { - var nogo *load.NoGoError - var embed *load.EmbedError - if (errors.As(pkg.Error, &nogo) || errors.As(pkg.Error, &embed)) && len(pkg.InternalAllGoFiles()) > 0 { - // Skip this error, as we will format - // all files regardless. + if _, ok := errors.AsType[*load.NoGoError](pkg.Error); ok { + // Skip this error, as we will format all files regardless. + } else if _, ok := errors.AsType[*load.EmbedError](pkg.Error); ok && len(pkg.InternalAllGoFiles()) > 0 { + // Skip this error, as we will format all files regardless. } else { base.Errorf("%v", pkg.Error) continue diff --git a/src/cmd/go/internal/generate/generate.go b/src/cmd/go/internal/generate/generate.go index 0f4b4a972e9..59142859c1f 100644 --- a/src/cmd/go/internal/generate/generate.go +++ b/src/cmd/go/internal/generate/generate.go @@ -182,7 +182,8 @@ func init() { } func runGenerate(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() + moduleLoaderState := modload.NewState() + moduleLoaderState.InitWorkfile() if generateRunFlag != "" { var err error @@ -204,8 +205,8 @@ func runGenerate(ctx context.Context, cmd *base.Command, args []string) { // Even if the arguments are .go files, this loop suffices. printed := false pkgOpts := load.PackageOpts{IgnoreImports: true} - for _, pkg := range load.PackagesAndErrors(ctx, pkgOpts, args) { - if modload.Enabled() && pkg.Module != nil && !pkg.Module.Main { + for _, pkg := range load.PackagesAndErrors(moduleLoaderState, ctx, pkgOpts, args) { + if moduleLoaderState.Enabled() && pkg.Module != nil && !pkg.Module.Main { if !printed { fmt.Fprintf(os.Stderr, "go: not generating in packages in dependency modules\n") printed = true diff --git a/src/cmd/go/internal/help/helpdoc.go b/src/cmd/go/internal/help/helpdoc.go index ab04ce001c4..1d3ffefc975 100644 --- a/src/cmd/go/internal/help/helpdoc.go +++ b/src/cmd/go/internal/help/helpdoc.go @@ -1009,6 +1009,12 @@ Naming a file dns_windows.go will cause it to be included only when building the package for Windows; similarly, math_386.s will be included only when building the package for 32-bit x86. +By convention, packages with assembly implementations may provide a go-only +version under the "purego" build constraint. This does not limit the use of +cgo (use the "cgo" build constraint) or unsafe. For example: + + //go:build purego + Go versions 1.16 and earlier used a different syntax for build constraints, with a "// +build" prefix. The gofmt command will add an equivalent //go:build constraint when encountering the older syntax. diff --git a/src/cmd/go/internal/list/list.go b/src/cmd/go/internal/list/list.go index bee7dc8053e..81ac4ebaf9c 100644 --- a/src/cmd/go/internal/list/list.go +++ b/src/cmd/go/internal/list/list.go @@ -419,7 +419,8 @@ func (v *jsonFlag) needAny(fields ...string) bool { var nl = []byte{'\n'} func runList(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() + moduleLoaderState := modload.NewState() + moduleLoaderState.InitWorkfile() if *listFmt != "" && listJson { base.Fatalf("go list -f cannot be used with -json") @@ -427,11 +428,11 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { if *listReuse != "" && !*listM { base.Fatalf("go list -reuse cannot be used without -m") } - if *listReuse != "" && modload.HasModRoot() { + if *listReuse != "" && moduleLoaderState.HasModRoot() { base.Fatalf("go list -reuse cannot be used inside a module") } - work.BuildInit() + work.BuildInit(moduleLoaderState) out := newTrackingWriter(os.Stdout) defer out.w.Flush() @@ -479,7 +480,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { fm := template.FuncMap{ "join": strings.Join, "context": context, - "module": func(path string) *modinfo.ModulePublic { return modload.ModuleInfo(ctx, path) }, + "module": func(path string) *modinfo.ModulePublic { return modload.ModuleInfo(moduleLoaderState, ctx, path) }, } tmpl, err := template.New("main").Funcs(fm).Parse(*listFmt) if err != nil { @@ -496,12 +497,12 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { } } - modload.Init() + modload.Init(moduleLoaderState) if *listRetracted { if cfg.BuildMod == "vendor" { base.Fatalf("go list -retracted cannot be used when vendoring is enabled") } - if !modload.Enabled() { + if !moduleLoaderState.Enabled() { base.Fatalf("go list -retracted can only be used in module-aware mode") } } @@ -525,11 +526,11 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { base.Fatalf("go list -test cannot be used with -m") } - if modload.Init(); !modload.Enabled() { + if modload.Init(moduleLoaderState); !moduleLoaderState.Enabled() { base.Fatalf("go: list -m cannot be used with GO111MODULE=off") } - modload.LoadModFile(ctx) // Sets cfg.BuildMod as a side-effect. + modload.LoadModFile(moduleLoaderState, ctx) // Sets cfg.BuildMod as a side-effect. if cfg.BuildMod == "vendor" { const actionDisabledFormat = "go: can't %s using the vendor directory\n\t(Use -mod=mod or -mod=readonly to bypass.)" @@ -569,7 +570,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { if *listReuse != "" && len(args) == 0 { base.Fatalf("go: list -m -reuse only has an effect with module@version arguments") } - mods, err := modload.ListModules(ctx, args, mode, *listReuse) + mods, err := modload.ListModules(moduleLoaderState, ctx, args, mode, *listReuse) if !*listE { for _, m := range mods { if m.Error != nil { @@ -613,7 +614,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { SuppressBuildInfo: !*listExport && !listJsonFields.needAny("Stale", "StaleReason"), SuppressEmbedFiles: !*listExport && !listJsonFields.needAny("EmbedFiles", "TestEmbedFiles", "XTestEmbedFiles"), } - pkgs := load.PackagesAndErrors(ctx, pkgOpts, args) + pkgs := load.PackagesAndErrors(moduleLoaderState, ctx, pkgOpts, args) if !*listE { w := 0 for _, pkg := range pkgs { @@ -648,10 +649,10 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { sema.Release(1) wg.Done() } - pmain, ptest, pxtest = load.TestPackagesAndErrors(ctx, done, pkgOpts, p, nil) + pmain, ptest, pxtest = load.TestPackagesAndErrors(moduleLoaderState, ctx, done, pkgOpts, p, nil) } else { var perr *load.Package - pmain, ptest, pxtest, perr = load.TestPackagesFor(ctx, pkgOpts, p, nil) + pmain, ptest, pxtest, perr = load.TestPackagesFor(moduleLoaderState, ctx, pkgOpts, p, nil) if perr != nil { base.Fatalf("go: can't load test package: %s", perr.Error) } @@ -713,7 +714,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { // Do we need to run a build to gather information? needStale := (listJson && listJsonFields.needAny("Stale", "StaleReason")) || strings.Contains(*listFmt, ".Stale") if needStale || *listExport || *listCompiled { - b := work.NewBuilder("") + b := work.NewBuilder("", moduleLoaderState.VendorDirOrEmpty) if *listE { b.AllowErrors = true } @@ -727,13 +728,13 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { b.NeedExport = *listExport b.NeedCompiledGoFiles = *listCompiled if cfg.BuildCover { - load.PrepareForCoverageBuild(pkgs) + load.PrepareForCoverageBuild(moduleLoaderState, pkgs) } a := &work.Action{} // TODO: Use pkgsFilter? for _, p := range pkgs { if len(p.GoFiles)+len(p.CgoFiles) > 0 { - a.Deps = append(a.Deps, b.AutoAction(work.ModeInstall, work.ModeInstall, p)) + a.Deps = append(a.Deps, b.AutoAction(moduleLoaderState, work.ModeInstall, work.ModeInstall, p)) } } b.Do(ctx, a) @@ -741,8 +742,8 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { for _, p := range pkgs { // Show vendor-expanded paths in listing - p.TestImports = p.Resolve(p.TestImports) - p.XTestImports = p.Resolve(p.XTestImports) + p.TestImports = p.Resolve(moduleLoaderState, p.TestImports) + p.XTestImports = p.Resolve(moduleLoaderState, p.XTestImports) p.DepOnly = !cmdline[p] if *listCompiled { @@ -850,7 +851,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { if *listRetracted { mode |= modload.ListRetracted } - rmods, err := modload.ListModules(ctx, args, mode, *listReuse) + rmods, err := modload.ListModules(moduleLoaderState, ctx, args, mode, *listReuse) if err != nil && !*listE { base.Error(err) } diff --git a/src/cmd/go/internal/load/flag.go b/src/cmd/go/internal/load/flag.go index 55bdab01350..a9188db0fd2 100644 --- a/src/cmd/go/internal/load/flag.go +++ b/src/cmd/go/internal/load/flag.go @@ -6,6 +6,7 @@ package load import ( "cmd/go/internal/base" + "cmd/go/internal/modload" "cmd/internal/quoted" "fmt" "strings" @@ -29,7 +30,7 @@ type PerPackageFlag struct { // A ppfValue is a single = per-package flag value. type ppfValue struct { - match func(*Package) bool // compiled pattern + match func(*modload.State, *Package) bool // compiled pattern flags []string } @@ -42,7 +43,7 @@ func (f *PerPackageFlag) Set(v string) error { func (f *PerPackageFlag) set(v, cwd string) error { f.raw = v f.present = true - match := func(p *Package) bool { return p.Internal.CmdlinePkg || p.Internal.CmdlineFiles } // default predicate with no pattern + match := func(_ *modload.State, p *Package) bool { return p.Internal.CmdlinePkg || p.Internal.CmdlineFiles } // default predicate with no pattern // For backwards compatibility with earlier flag splitting, ignore spaces around flags. v = strings.TrimSpace(v) if v == "" { @@ -85,10 +86,13 @@ func (f *PerPackageFlag) Present() bool { } // For returns the flags to use for the given package. -func (f *PerPackageFlag) For(p *Package) []string { +// +// The module loader state is used by the matcher to know if certain +// patterns match packages within the state's MainModules. +func (f *PerPackageFlag) For(s *modload.State, p *Package) []string { flags := []string{} for _, v := range f.values { - if v.match(p) { + if v.match(s, p) { flags = v.flags } } diff --git a/src/cmd/go/internal/load/flag_test.go b/src/cmd/go/internal/load/flag_test.go index d3223e12d52..0c2363cb790 100644 --- a/src/cmd/go/internal/load/flag_test.go +++ b/src/cmd/go/internal/load/flag_test.go @@ -5,6 +5,7 @@ package load import ( + "cmd/go/internal/modload" "fmt" "path/filepath" "reflect" @@ -125,7 +126,7 @@ func TestPerPackageFlag(t *testing.T) { } for _, p := range tt.pkgs { dir := nativeDir(p.dir) - flags := ppFlags.For(&Package{PackagePublic: PackagePublic{ImportPath: p.path, Dir: dir}, Internal: PackageInternal{CmdlinePkg: p.cmdline}}) + flags := ppFlags.For(modload.NewState(), &Package{PackagePublic: PackagePublic{ImportPath: p.path, Dir: dir}, Internal: PackageInternal{CmdlinePkg: p.cmdline}}) if !reflect.DeepEqual(flags, p.flags) { t.Errorf("For(%v, %v, %v) = %v, want %v", p.path, dir, p.cmdline, flags, p.flags) } diff --git a/src/cmd/go/internal/load/godebug.go b/src/cmd/go/internal/load/godebug.go index 8ea8ffab1ae..817cc4faebf 100644 --- a/src/cmd/go/internal/load/godebug.go +++ b/src/cmd/go/internal/load/godebug.go @@ -45,12 +45,12 @@ func ParseGoDebug(text string) (key, value string, err error) { // defaultGODEBUG returns the default GODEBUG setting for the main package p. // When building a test binary, directives, testDirectives, and xtestDirectives // list additional directives from the package under test. -func defaultGODEBUG(p *Package, directives, testDirectives, xtestDirectives []build.Directive) string { +func defaultGODEBUG(loaderstate *modload.State, p *Package, directives, testDirectives, xtestDirectives []build.Directive) string { if p.Name != "main" { return "" } - goVersion := modload.MainModules.GoVersion() - if modload.RootMode == modload.NoRoot && p.Module != nil { + goVersion := loaderstate.MainModules.GoVersion(loaderstate) + if loaderstate.RootMode == modload.NoRoot && p.Module != nil { // This is go install pkg@version or go run pkg@version. // Use the Go version from the package. // If there isn't one, then assume Go 1.20, @@ -73,7 +73,7 @@ func defaultGODEBUG(p *Package, directives, testDirectives, xtestDirectives []bu } // Add directives from main module go.mod. - for _, g := range modload.MainModules.Godebugs() { + for _, g := range loaderstate.MainModules.Godebugs(loaderstate) { if m == nil { m = make(map[string]string) } diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go index 1f791546f90..3b8bbdc91b8 100644 --- a/src/cmd/go/internal/load/pkg.go +++ b/src/cmd/go/internal/load/pkg.go @@ -290,8 +290,8 @@ func (p *Package) setLoadPackageDataError(err error, path string, stk *ImportSta // Replace (possibly wrapped) *build.NoGoError with *load.NoGoError. // The latter is more specific about the cause. - var nogoErr *build.NoGoError - if errors.As(err, &nogoErr) { + nogoErr, ok := errors.AsType[*build.NoGoError](err) + if ok { if p.Dir == "" && nogoErr.Dir != "" { p.Dir = nogoErr.Dir } @@ -355,14 +355,14 @@ func (p *Package) setLoadPackageDataError(err error, path string, stk *ImportSta // can produce better error messages if it starts with the original paths. // The initial load of p loads all the non-test imports and rewrites // the vendored paths, so nothing should ever call p.vendored(p.Imports). -func (p *Package) Resolve(imports []string) []string { +func (p *Package) Resolve(s *modload.State, imports []string) []string { if len(imports) > 0 && len(p.Imports) > 0 && &imports[0] == &p.Imports[0] { panic("internal error: p.Resolve(p.Imports) called") } seen := make(map[string]bool) var all []string for _, path := range imports { - path = ResolveImportPath(p, path) + path = ResolveImportPath(s, p, path) if !seen[path] { seen[path] = true all = append(all, path) @@ -686,8 +686,8 @@ const ( ) // LoadPackage does Load import, but without a parent package load context -func LoadPackage(ctx context.Context, opts PackageOpts, path, srcDir string, stk *ImportStack, importPos []token.Position, mode int) *Package { - p, err := loadImport(ctx, opts, nil, path, srcDir, nil, stk, importPos, mode) +func LoadPackage(loaderstate *modload.State, ctx context.Context, opts PackageOpts, path, srcDir string, stk *ImportStack, importPos []token.Position, mode int) *Package { + p, err := loadImport(loaderstate, ctx, opts, nil, path, srcDir, nil, stk, importPos, mode) if err != nil { base.Fatalf("internal error: loadImport of %q with nil parent returned an error", path) } @@ -703,7 +703,7 @@ func LoadPackage(ctx context.Context, opts PackageOpts, path, srcDir string, stk // The returned PackageError, if any, describes why parent is not allowed // to import the named package, with the error referring to importPos. // The PackageError can only be non-nil when parent is not nil. -func loadImport(ctx context.Context, opts PackageOpts, pre *preload, path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) (*Package, *PackageError) { +func loadImport(loaderstate *modload.State, ctx context.Context, opts PackageOpts, pre *preload, path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) (*Package, *PackageError) { ctx, span := trace.StartSpan(ctx, "modload.loadImport "+path) defer span.Done() @@ -718,9 +718,9 @@ func loadImport(ctx context.Context, opts PackageOpts, pre *preload, path, srcDi parentRoot = parent.Root parentIsStd = parent.Standard } - bp, loaded, err := loadPackageData(ctx, path, parentPath, srcDir, parentRoot, parentIsStd, mode) + bp, loaded, err := loadPackageData(loaderstate, ctx, path, parentPath, srcDir, parentRoot, parentIsStd, mode) if loaded && pre != nil && !opts.IgnoreImports { - pre.preloadImports(ctx, opts, bp.Imports, bp) + pre.preloadImports(loaderstate, ctx, opts, bp.Imports, bp) } if bp == nil { p := &Package{ @@ -771,7 +771,7 @@ func loadImport(ctx context.Context, opts PackageOpts, pre *preload, path, srcDi // Load package. // loadPackageData may return bp != nil even if an error occurs, // in order to return partial information. - p.load(ctx, opts, path, stk, importPos, bp, err) + p.load(loaderstate, ctx, opts, path, stk, importPos, bp, err) if !cfg.ModulesEnabled && path != cleanImport(path) { p.Error = &PackageError{ @@ -784,7 +784,7 @@ func loadImport(ctx context.Context, opts PackageOpts, pre *preload, path, srcDi } // Checked on every import because the rules depend on the code doing the importing. - if perr := disallowInternal(ctx, srcDir, parent, parentPath, p, stk); perr != nil { + if perr := disallowInternal(loaderstate, ctx, srcDir, parent, parentPath, p, stk); perr != nil { perr.setPos(importPos) return p, perr } @@ -838,7 +838,7 @@ func extractFirstImport(importPos []token.Position) *token.Position { // // loadPackageData returns a boolean, loaded, which is true if this is the // first time the package was loaded. Callers may preload imports in this case. -func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoot string, parentIsStd bool, mode int) (bp *build.Package, loaded bool, err error) { +func loadPackageData(loaderstate *modload.State, ctx context.Context, path, parentPath, parentDir, parentRoot string, parentIsStd bool, mode int) (bp *build.Package, loaded bool, err error) { ctx, span := trace.StartSpan(ctx, "load.loadPackageData "+path) defer span.Done() @@ -883,7 +883,7 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo r.path = newPath r.dir = dir } else if cfg.ModulesEnabled { - r.dir, r.path, r.err = modload.Lookup(parentPath, parentIsStd, path) + r.dir, r.path, r.err = modload.Lookup(loaderstate, parentPath, parentIsStd, path) } else if build.IsLocalImport(path) { r.dir = filepath.Join(parentDir, path) r.path = dirToImportPath(r.dir) @@ -892,7 +892,7 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo // find out the key to use in packageCache without the // overhead of repeated calls to buildContext.Import. // The code is also needed in a few other places anyway. - r.path = resolveImportPath(path, parentPath, parentDir, parentRoot, parentIsStd) + r.path = resolveImportPath(loaderstate, path, parentPath, parentDir, parentRoot, parentIsStd) } else if mode&ResolveModule != 0 { r.path = moduleImportPath(path, parentPath, parentDir, parentRoot) } @@ -921,7 +921,7 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo } else { buildContext.GOPATH = "" // Clear GOPATH so packages are imported as pure module packages } - modroot := modload.PackageModRoot(ctx, r.path) + modroot := modload.PackageModRoot(loaderstate, ctx, r.path) if modroot == "" && str.HasPathPrefix(r.dir, cfg.GOROOTsrc) { modroot = cfg.GOROOTsrc gorootSrcCmd := filepath.Join(cfg.GOROOTsrc, "cmd") @@ -942,7 +942,7 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo if cfg.ModulesEnabled { // Override data.p.Root, since ImportDir sets it to $GOPATH, if // the module is inside $GOPATH/src. - if info := modload.PackageModuleInfo(ctx, path); info != nil { + if info := modload.PackageModuleInfo(loaderstate, ctx, path); info != nil { data.p.Root = info.Dir } } @@ -989,7 +989,7 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo if cfg.GOBIN != "" { data.p.BinDir = cfg.GOBIN } else if cfg.ModulesEnabled { - data.p.BinDir = modload.BinDir() + data.p.BinDir = modload.BinDir(loaderstate) } } @@ -1068,7 +1068,7 @@ func newPreload() *preload { // preloadMatches loads data for package paths matched by patterns. // When preloadMatches returns, some packages may not be loaded yet, but // loadPackageData and loadImport are always safe to call. -func (pre *preload) preloadMatches(ctx context.Context, opts PackageOpts, matches []*search.Match) { +func (pre *preload) preloadMatches(loaderstate *modload.State, ctx context.Context, opts PackageOpts, matches []*search.Match) { for _, m := range matches { for _, pkg := range m.Pkgs { select { @@ -1077,10 +1077,10 @@ func (pre *preload) preloadMatches(ctx context.Context, opts PackageOpts, matche case pre.sema <- struct{}{}: go func(pkg string) { mode := 0 // don't use vendoring or module import resolution - bp, loaded, err := loadPackageData(ctx, pkg, "", base.Cwd(), "", false, mode) + bp, loaded, err := loadPackageData(loaderstate, ctx, pkg, "", base.Cwd(), "", false, mode) <-pre.sema if bp != nil && loaded && err == nil && !opts.IgnoreImports { - pre.preloadImports(ctx, opts, bp.Imports, bp) + pre.preloadImports(loaderstate, ctx, opts, bp.Imports, bp) } }(pkg) } @@ -1091,7 +1091,7 @@ func (pre *preload) preloadMatches(ctx context.Context, opts PackageOpts, matche // preloadImports queues a list of imports for preloading. // When preloadImports returns, some packages may not be loaded yet, // but loadPackageData and loadImport are always safe to call. -func (pre *preload) preloadImports(ctx context.Context, opts PackageOpts, imports []string, parent *build.Package) { +func (pre *preload) preloadImports(loaderstate *modload.State, ctx context.Context, opts PackageOpts, imports []string, parent *build.Package) { parentIsStd := parent.Goroot && parent.ImportPath != "" && search.IsStandardImportPath(parent.ImportPath) for _, path := range imports { if path == "C" || path == "unsafe" { @@ -1102,10 +1102,10 @@ func (pre *preload) preloadImports(ctx context.Context, opts PackageOpts, import return case pre.sema <- struct{}{}: go func(path string) { - bp, loaded, err := loadPackageData(ctx, path, parent.ImportPath, parent.Dir, parent.Root, parentIsStd, ResolveImport) + bp, loaded, err := loadPackageData(loaderstate, ctx, path, parent.ImportPath, parent.Dir, parent.Root, parentIsStd, ResolveImport) <-pre.sema if bp != nil && loaded && err == nil && !opts.IgnoreImports { - pre.preloadImports(ctx, opts, bp.Imports, bp) + pre.preloadImports(loaderstate, ctx, opts, bp.Imports, bp) } }(path) } @@ -1151,7 +1151,7 @@ func isDir(path string) bool { // First, there is Go 1.5 vendoring (golang.org/s/go15vendor). // If vendor expansion doesn't trigger, then the path is also subject to // Go 1.11 module legacy conversion (golang.org/issue/25069). -func ResolveImportPath(parent *Package, path string) (found string) { +func ResolveImportPath(s *modload.State, parent *Package, path string) (found string) { var parentPath, parentDir, parentRoot string parentIsStd := false if parent != nil { @@ -1160,12 +1160,12 @@ func ResolveImportPath(parent *Package, path string) (found string) { parentRoot = parent.Root parentIsStd = parent.Standard } - return resolveImportPath(path, parentPath, parentDir, parentRoot, parentIsStd) + return resolveImportPath(s, path, parentPath, parentDir, parentRoot, parentIsStd) } -func resolveImportPath(path, parentPath, parentDir, parentRoot string, parentIsStd bool) (found string) { +func resolveImportPath(s *modload.State, path, parentPath, parentDir, parentRoot string, parentIsStd bool) (found string) { if cfg.ModulesEnabled { - if _, p, e := modload.Lookup(parentPath, parentIsStd, path); e == nil { + if _, p, e := modload.Lookup(s, parentPath, parentIsStd, path); e == nil { return p } return path @@ -1463,7 +1463,7 @@ func reusePackage(p *Package, stk *ImportStack) *Package { // is allowed to import p. // If the import is allowed, disallowInternal returns the original package p. // If not, it returns a new package containing just an appropriate error. -func disallowInternal(ctx context.Context, srcDir string, importer *Package, importerPath string, p *Package, stk *ImportStack) *PackageError { +func disallowInternal(loaderstate *modload.State, ctx context.Context, srcDir string, importer *Package, importerPath string, p *Package, stk *ImportStack) *PackageError { // golang.org/s/go14internal: // An import of a path containing the element “internal” // is disallowed if the importing code is outside the tree @@ -1552,7 +1552,7 @@ func disallowInternal(ctx context.Context, srcDir string, importer *Package, imp // directory containing them. // If the directory is outside the main modules, this will resolve to ".", // which is not a prefix of any valid module. - importerPath, _ = modload.MainModules.DirImportPath(ctx, importer.Dir) + importerPath, _ = loaderstate.MainModules.DirImportPath(loaderstate, ctx, importer.Dir) } parentOfInternal := p.ImportPath[:i] if str.HasPathPrefix(importerPath, parentOfInternal) { @@ -1771,7 +1771,7 @@ func (p *Package) DefaultExecName() string { // load populates p using information from bp, err, which should // be the result of calling build.Context.Import. // stk contains the import stack, not including path itself. -func (p *Package) load(ctx context.Context, opts PackageOpts, path string, stk *ImportStack, importPos []token.Position, bp *build.Package, err error) { +func (p *Package) load(loaderstate *modload.State, ctx context.Context, opts PackageOpts, path string, stk *ImportStack, importPos []token.Position, bp *build.Package, err error) { p.copyBuild(opts, bp) // The localPrefix is the path we interpret ./ imports relative to, @@ -1835,7 +1835,7 @@ func (p *Package) load(ctx context.Context, opts PackageOpts, path string, stk * elem = full } if p.Internal.Build.BinDir == "" && cfg.ModulesEnabled { - p.Internal.Build.BinDir = modload.BinDir() + p.Internal.Build.BinDir = modload.BinDir(loaderstate) } if p.Internal.Build.BinDir != "" { // Install to GOBIN or bin of GOPATH entry. @@ -1935,7 +1935,7 @@ func (p *Package) load(ctx context.Context, opts PackageOpts, path string, stk * // The linker loads implicit dependencies. if p.Name == "main" && !p.Internal.ForceLibrary { - ldDeps, err := LinkerDeps(p) + ldDeps, err := LinkerDeps(loaderstate, p) if err != nil { setError(err) return @@ -1973,9 +1973,9 @@ func (p *Package) load(ctx context.Context, opts PackageOpts, path string, stk * pkgPath = "command-line-arguments" } if cfg.ModulesEnabled { - p.Module = modload.PackageModuleInfo(ctx, pkgPath) + p.Module = modload.PackageModuleInfo(loaderstate, ctx, pkgPath) } - p.DefaultGODEBUG = defaultGODEBUG(p, nil, nil, nil) + p.DefaultGODEBUG = defaultGODEBUG(loaderstate, p, nil, nil, nil) if !opts.SuppressEmbedFiles { p.EmbedFiles, p.Internal.Embed, err = resolveEmbed(p.Dir, p.EmbedPatterns) @@ -2026,7 +2026,7 @@ func (p *Package) load(ctx context.Context, opts PackageOpts, path string, stk * if path == "C" { continue } - p1, err := loadImport(ctx, opts, nil, path, p.Dir, p, stk, p.Internal.Build.ImportPos[path], ResolveImport) + p1, err := loadImport(loaderstate, ctx, opts, nil, path, p.Dir, p, stk, p.Internal.Build.ImportPos[path], ResolveImport) if err != nil && p.Error == nil { p.Error = err p.Incomplete = true @@ -2635,12 +2635,12 @@ func SafeArg(name string) bool { } // LinkerDeps returns the list of linker-induced dependencies for main package p. -func LinkerDeps(p *Package) ([]string, error) { +func LinkerDeps(s *modload.State, p *Package) ([]string, error) { // Everything links runtime. deps := []string{"runtime"} // External linking mode forces an import of runtime/cgo. - if what := externalLinkingReason(p); what != "" && cfg.BuildContext.Compiler != "gccgo" { + if what := externalLinkingReason(s, p); what != "" && cfg.BuildContext.Compiler != "gccgo" { if !cfg.BuildContext.CgoEnabled { return nil, fmt.Errorf("%s requires external (cgo) linking, but cgo is not enabled", what) } @@ -2673,7 +2673,7 @@ func LinkerDeps(p *Package) ([]string, error) { // externalLinkingReason reports the reason external linking is required // even for programs that do not use cgo, or the empty string if external // linking is not required. -func externalLinkingReason(p *Package) (what string) { +func externalLinkingReason(s *modload.State, p *Package) (what string) { // Some targets must use external linking even inside GOROOT. if platform.MustLinkExternal(cfg.Goos, cfg.Goarch, false) { return cfg.Goos + "/" + cfg.Goarch @@ -2716,7 +2716,7 @@ func externalLinkingReason(p *Package) (what string) { // Using -ldflags=-linkmode=external forces external linking. // If there are multiple -linkmode options, the last one wins. if p != nil { - ldflags := BuildLdflags.For(p) + ldflags := BuildLdflags.For(s, p) for i := len(ldflags) - 1; i >= 0; i-- { a := ldflags[i] if a == "-linkmode=external" || @@ -2797,7 +2797,7 @@ func PackageList(roots []*Package) []*Package { // TestPackageList returns the list of packages in the dag rooted at roots // as visited in a depth-first post-order traversal, including the test // imports of the roots. This ignores errors in test packages. -func TestPackageList(ctx context.Context, opts PackageOpts, roots []*Package) []*Package { +func TestPackageList(loaderstate *modload.State, ctx context.Context, opts PackageOpts, roots []*Package) []*Package { seen := map[*Package]bool{} all := []*Package{} var walk func(*Package) @@ -2813,7 +2813,7 @@ func TestPackageList(ctx context.Context, opts PackageOpts, roots []*Package) [] } walkTest := func(root *Package, path string) { var stk ImportStack - p1, err := loadImport(ctx, opts, nil, path, root.Dir, root, &stk, root.Internal.Build.TestImportPos[path], ResolveImport) + p1, err := loadImport(loaderstate, ctx, opts, nil, path, root.Dir, root, &stk, root.Internal.Build.TestImportPos[path], ResolveImport) if err != nil && root.Error == nil { // Assign error importing the package to the importer. root.Error = err @@ -2840,17 +2840,17 @@ func TestPackageList(ctx context.Context, opts PackageOpts, roots []*Package) [] // dependencies (like sync/atomic for coverage). // TODO(jayconrod): delete this function and set flags automatically // in LoadImport instead. -func LoadImportWithFlags(path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) (*Package, *PackageError) { - p, err := loadImport(context.TODO(), PackageOpts{}, nil, path, srcDir, parent, stk, importPos, mode) - setToolFlags(p) +func LoadImportWithFlags(loaderstate *modload.State, path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) (*Package, *PackageError) { + p, err := loadImport(loaderstate, context.TODO(), PackageOpts{}, nil, path, srcDir, parent, stk, importPos, mode) + setToolFlags(loaderstate, p) return p, err } // LoadPackageWithFlags is the same as LoadImportWithFlags but without a parent. // It's then guaranteed to not return an error -func LoadPackageWithFlags(path, srcDir string, stk *ImportStack, importPos []token.Position, mode int) *Package { - p := LoadPackage(context.TODO(), PackageOpts{}, path, srcDir, stk, importPos, mode) - setToolFlags(p) +func LoadPackageWithFlags(loaderstate *modload.State, path, srcDir string, stk *ImportStack, importPos []token.Position, mode int) *Package { + p := LoadPackage(loaderstate, context.TODO(), PackageOpts{}, path, srcDir, stk, importPos, mode) + setToolFlags(loaderstate, p) return p } @@ -2899,7 +2899,7 @@ type PackageOpts struct { // // To obtain a flat list of packages, use PackageList. // To report errors loading packages, use ReportPackageErrors. -func PackagesAndErrors(ctx context.Context, opts PackageOpts, patterns []string) []*Package { +func PackagesAndErrors(loaderstate *modload.State, ctx context.Context, opts PackageOpts, patterns []string) []*Package { ctx, span := trace.StartSpan(ctx, "load.PackagesAndErrors") defer span.Done() @@ -2911,7 +2911,7 @@ func PackagesAndErrors(ctx context.Context, opts PackageOpts, patterns []string) // We need to test whether the path is an actual Go file and not a // package path or pattern ending in '.go' (see golang.org/issue/34653). if fi, err := fsys.Stat(p); err == nil && !fi.IsDir() { - pkgs := []*Package{GoFilesPackage(ctx, opts, patterns)} + pkgs := []*Package{GoFilesPackage(loaderstate, ctx, opts, patterns)} setPGOProfilePath(pkgs) return pkgs } @@ -2919,13 +2919,13 @@ func PackagesAndErrors(ctx context.Context, opts PackageOpts, patterns []string) } var matches []*search.Match - if modload.Init(); cfg.ModulesEnabled { + if modload.Init(loaderstate); cfg.ModulesEnabled { modOpts := modload.PackageOpts{ ResolveMissingImports: true, LoadTests: opts.ModResolveTests, SilencePackageErrors: true, } - matches, _ = modload.LoadPackages(ctx, modOpts, patterns...) + matches, _ = modload.LoadPackages(loaderstate, ctx, modOpts, patterns...) } else { matches = search.ImportPaths(patterns) } @@ -2938,7 +2938,7 @@ func PackagesAndErrors(ctx context.Context, opts PackageOpts, patterns []string) pre := newPreload() defer pre.flush() - pre.preloadMatches(ctx, opts, matches) + pre.preloadMatches(loaderstate, ctx, opts, matches) for _, m := range matches { for _, pkg := range m.Pkgs { @@ -2952,7 +2952,7 @@ func PackagesAndErrors(ctx context.Context, opts PackageOpts, patterns []string) // a literal and also a non-literal pattern. mode |= cmdlinePkgLiteral } - p, perr := loadImport(ctx, opts, pre, pkg, base.Cwd(), nil, &stk, nil, mode) + p, perr := loadImport(loaderstate, ctx, opts, pre, pkg, base.Cwd(), nil, &stk, nil, mode) if perr != nil { base.Fatalf("internal error: loadImport of %q with nil parent returned an error", pkg) } @@ -2992,7 +2992,7 @@ func PackagesAndErrors(ctx context.Context, opts PackageOpts, patterns []string) // compute the effective flags for all loaded packages // (not just the ones matching the patterns but also // their dependencies). - setToolFlags(pkgs...) + setToolFlags(loaderstate, pkgs...) setPGOProfilePath(pkgs) @@ -3231,20 +3231,20 @@ func (e *mainPackageError) ImportPath() string { return e.importPath } -func setToolFlags(pkgs ...*Package) { +func setToolFlags(loaderstate *modload.State, pkgs ...*Package) { for _, p := range PackageList(pkgs) { - p.Internal.Asmflags = BuildAsmflags.For(p) - p.Internal.Gcflags = BuildGcflags.For(p) - p.Internal.Ldflags = BuildLdflags.For(p) - p.Internal.Gccgoflags = BuildGccgoflags.For(p) + p.Internal.Asmflags = BuildAsmflags.For(loaderstate, p) + p.Internal.Gcflags = BuildGcflags.For(loaderstate, p) + p.Internal.Ldflags = BuildLdflags.For(loaderstate, p) + p.Internal.Gccgoflags = BuildGccgoflags.For(loaderstate, p) } } // GoFilesPackage creates a package for building a collection of Go files // (typically named on the command line). The target is named p.a for // package p or named after the first Go file for package main. -func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Package { - modload.Init() +func GoFilesPackage(loaderstate *modload.State, ctx context.Context, opts PackageOpts, gofiles []string) *Package { + modload.Init(loaderstate) for _, f := range gofiles { if !strings.HasSuffix(f, ".go") { @@ -3289,7 +3289,7 @@ func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Pa ctxt.ReadDir = func(string) ([]fs.FileInfo, error) { return dirent, nil } if cfg.ModulesEnabled { - modload.ImportFromFiles(ctx, gofiles) + modload.ImportFromFiles(loaderstate, ctx, gofiles) } var err error @@ -3305,7 +3305,7 @@ func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Pa pkg := new(Package) pkg.Internal.Local = true pkg.Internal.CmdlineFiles = true - pkg.load(ctx, opts, "command-line-arguments", &stk, nil, bp, err) + pkg.load(loaderstate, ctx, opts, "command-line-arguments", &stk, nil, bp, err) if !cfg.ModulesEnabled { pkg.Internal.LocalPrefix = dirToImportPath(dir) } @@ -3319,7 +3319,7 @@ func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Pa if cfg.GOBIN != "" { pkg.Target = filepath.Join(cfg.GOBIN, exe) } else if cfg.ModulesEnabled { - pkg.Target = filepath.Join(modload.BinDir(), exe) + pkg.Target = filepath.Join(modload.BinDir(loaderstate), exe) } } @@ -3327,7 +3327,7 @@ func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Pa pkg.Error = &PackageError{Err: &mainPackageError{importPath: pkg.ImportPath}} pkg.Incomplete = true } - setToolFlags(pkg) + setToolFlags(loaderstate, pkg) return pkg } @@ -3347,11 +3347,11 @@ func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Pa // module, but its go.mod file (if it has one) must not contain directives that // would cause it to be interpreted differently if it were the main module // (replace, exclude). -func PackagesAndErrorsOutsideModule(ctx context.Context, opts PackageOpts, args []string) ([]*Package, error) { - if !modload.ForceUseModules { +func PackagesAndErrorsOutsideModule(loaderstate *modload.State, ctx context.Context, opts PackageOpts, args []string) ([]*Package, error) { + if !loaderstate.ForceUseModules { panic("modload.ForceUseModules must be true") } - if modload.RootMode != modload.NoRoot { + if loaderstate.RootMode != modload.NoRoot { panic("modload.RootMode must be NoRoot") } @@ -3398,18 +3398,18 @@ func PackagesAndErrorsOutsideModule(ctx context.Context, opts PackageOpts, args // (first result). It's possible this module won't provide packages named by // later arguments, and other modules would. Let's not try to be too // magical though. - allowed := modload.CheckAllowed + allowed := loaderstate.CheckAllowed if modload.IsRevisionQuery(firstPath, version) { // Don't check for retractions if a specific revision is requested. allowed = nil } noneSelected := func(path string) (version string) { return "none" } - qrs, err := modload.QueryPackages(ctx, patterns[0], version, noneSelected, allowed) + qrs, err := modload.QueryPackages(loaderstate, ctx, patterns[0], version, noneSelected, allowed) if err != nil { return nil, fmt.Errorf("%s: %w", args[0], err) } rootMod := qrs[0].Mod - deprecation, err := modload.CheckDeprecation(ctx, rootMod) + deprecation, err := modload.CheckDeprecation(loaderstate, ctx, rootMod) if err != nil { return nil, fmt.Errorf("%s: %w", args[0], err) } @@ -3438,12 +3438,12 @@ func PackagesAndErrorsOutsideModule(ctx context.Context, opts PackageOpts, args // Since we are in NoRoot mode, the build list initially contains only // the dummy command-line-arguments module. Add a requirement on the // module that provides the packages named on the command line. - if _, err := modload.EditBuildList(ctx, nil, []module.Version{rootMod}); err != nil { + if _, err := modload.EditBuildList(loaderstate, ctx, nil, []module.Version{rootMod}); err != nil { return nil, fmt.Errorf("%s: %w", args[0], err) } // Load packages for all arguments. - pkgs := PackagesAndErrors(ctx, opts, patterns) + pkgs := PackagesAndErrors(loaderstate, ctx, opts, patterns) // Check that named packages are all provided by the same module. for _, pkg := range pkgs { @@ -3471,14 +3471,14 @@ func PackagesAndErrorsOutsideModule(ctx context.Context, opts PackageOpts, args } // EnsureImport ensures that package p imports the named package. -func EnsureImport(p *Package, pkg string) { +func EnsureImport(s *modload.State, p *Package, pkg string) { for _, d := range p.Internal.Imports { if d.Name == pkg { return } } - p1, err := LoadImportWithFlags(pkg, p.Dir, p, &ImportStack{}, nil, 0) + p1, err := LoadImportWithFlags(s, pkg, p.Dir, p, &ImportStack{}, nil, 0) if err != nil { base.Fatalf("load %s: %v", pkg, err) } @@ -3494,10 +3494,10 @@ func EnsureImport(p *Package, pkg string) { // "go test -cover"). It walks through the packages being built (and // dependencies) and marks them for coverage instrumentation when // appropriate, and possibly adding additional deps where needed. -func PrepareForCoverageBuild(pkgs []*Package) { - var match []func(*Package) bool +func PrepareForCoverageBuild(s *modload.State, pkgs []*Package) { + var match []func(*modload.State, *Package) bool - matchMainModAndCommandLine := func(p *Package) bool { + matchMainModAndCommandLine := func(_ *modload.State, p *Package) bool { // note that p.Standard implies p.Module == nil below. return p.Internal.CmdlineFiles || p.Internal.CmdlinePkg || (p.Module != nil && p.Module.Main) } @@ -3505,7 +3505,7 @@ func PrepareForCoverageBuild(pkgs []*Package) { if len(cfg.BuildCoverPkg) != 0 { // If -coverpkg has been specified, then we instrument only // the specific packages selected by the user-specified pattern(s). - match = make([]func(*Package) bool, len(cfg.BuildCoverPkg)) + match = make([]func(*modload.State, *Package) bool, len(cfg.BuildCoverPkg)) for i := range cfg.BuildCoverPkg { match[i] = MatchPackage(cfg.BuildCoverPkg[i], base.Cwd()) } @@ -3513,16 +3513,16 @@ func PrepareForCoverageBuild(pkgs []*Package) { // Without -coverpkg, instrument only packages in the main module // (if any), as well as packages/files specifically named on the // command line. - match = []func(*Package) bool{matchMainModAndCommandLine} + match = []func(*modload.State, *Package) bool{matchMainModAndCommandLine} } // Visit the packages being built or installed, along with all of // their dependencies, and mark them to be instrumented, taking // into account the matchers we've set up in the sequence above. - SelectCoverPackages(PackageList(pkgs), match, "build") + SelectCoverPackages(s, PackageList(pkgs), match, "build") } -func SelectCoverPackages(roots []*Package, match []func(*Package) bool, op string) []*Package { +func SelectCoverPackages(s *modload.State, roots []*Package, match []func(*modload.State, *Package) bool, op string) []*Package { var warntag string var includeMain bool switch op { @@ -3540,7 +3540,7 @@ func SelectCoverPackages(roots []*Package, match []func(*Package) bool, op strin for _, p := range roots { haveMatch := false for i := range match { - if match[i](p) { + if match[i](s, p) { matched[i] = true haveMatch = true } @@ -3602,7 +3602,7 @@ func SelectCoverPackages(roots []*Package, match []func(*Package) bool, op strin // Force import of sync/atomic into package if atomic mode. if cfg.BuildCoverMode == "atomic" { - EnsureImport(p, "sync/atomic") + EnsureImport(s, p, "sync/atomic") } } diff --git a/src/cmd/go/internal/load/search.go b/src/cmd/go/internal/load/search.go index 941cfb77a2e..749a00e8485 100644 --- a/src/cmd/go/internal/load/search.go +++ b/src/cmd/go/internal/load/search.go @@ -14,7 +14,7 @@ import ( ) // MatchPackage(pattern, cwd)(p) reports whether package p matches pattern in the working directory cwd. -func MatchPackage(pattern, cwd string) func(*Package) bool { +func MatchPackage(pattern, cwd string) func(*modload.State, *Package) bool { switch { case search.IsRelativePath(pattern): // Split pattern into leading pattern-free directory path @@ -29,10 +29,10 @@ func MatchPackage(pattern, cwd string) func(*Package) bool { } dir = filepath.Join(cwd, dir) if pattern == "" { - return func(p *Package) bool { return p.Dir == dir } + return func(_ *modload.State, p *Package) bool { return p.Dir == dir } } matchPath := pkgpattern.MatchPattern(pattern) - return func(p *Package) bool { + return func(_ *modload.State, p *Package) bool { // Compute relative path to dir and see if it matches the pattern. rel, err := filepath.Rel(dir, p.Dir) if err != nil { @@ -49,22 +49,22 @@ func MatchPackage(pattern, cwd string) func(*Package) bool { // This is slightly inaccurate: it matches every package, which isn't the same // as matching the "all" package pattern. // TODO(matloob): Should we make this more accurate? Does anyone depend on this behavior? - return func(p *Package) bool { return true } + return func(_ *modload.State, p *Package) bool { return true } case pattern == "std": - return func(p *Package) bool { return p.Standard } + return func(_ *modload.State, p *Package) bool { return p.Standard } case pattern == "cmd": - return func(p *Package) bool { return p.Standard && strings.HasPrefix(p.ImportPath, "cmd/") } - case pattern == "tool" && modload.Enabled(): - return func(p *Package) bool { - return modload.MainModules.Tools()[p.ImportPath] - } - case pattern == "work" && modload.Enabled(): - return func(p *Package) bool { - return p.Module != nil && modload.MainModules.Contains(p.Module.Path) - } - + return func(_ *modload.State, p *Package) bool { return p.Standard && strings.HasPrefix(p.ImportPath, "cmd/") } default: - matchPath := pkgpattern.MatchPattern(pattern) - return func(p *Package) bool { return matchPath(p.ImportPath) } + return func(s *modload.State, p *Package) bool { + switch { + case pattern == "tool" && s.Enabled(): + return s.MainModules.Tools()[p.ImportPath] + case pattern == "work" && s.Enabled(): + return p.Module != nil && s.MainModules.Contains(p.Module.Path) + default: + matchPath := pkgpattern.MatchPattern(pattern) + return matchPath(p.ImportPath) + } + } } } diff --git a/src/cmd/go/internal/load/test.go b/src/cmd/go/internal/load/test.go index f895e3a2461..c7c58fd5487 100644 --- a/src/cmd/go/internal/load/test.go +++ b/src/cmd/go/internal/load/test.go @@ -24,6 +24,7 @@ import ( "unicode/utf8" "cmd/go/internal/fsys" + "cmd/go/internal/modload" "cmd/go/internal/str" "cmd/go/internal/trace" ) @@ -47,8 +48,8 @@ type TestCover struct { // the package containing an error if the test packages or // their dependencies have errors. // Only test packages without errors are returned. -func TestPackagesFor(ctx context.Context, opts PackageOpts, p *Package, cover *TestCover) (pmain, ptest, pxtest, perr *Package) { - pmain, ptest, pxtest = TestPackagesAndErrors(ctx, nil, opts, p, cover) +func TestPackagesFor(loaderstate *modload.State, ctx context.Context, opts PackageOpts, p *Package, cover *TestCover) (pmain, ptest, pxtest, perr *Package) { + pmain, ptest, pxtest = TestPackagesAndErrors(loaderstate, ctx, nil, opts, p, cover) for _, p1 := range []*Package{ptest, pxtest, pmain} { if p1 == nil { // pxtest may be nil @@ -98,7 +99,7 @@ func TestPackagesFor(ctx context.Context, opts PackageOpts, p *Package, cover *T // // The caller is expected to have checked that len(p.TestGoFiles)+len(p.XTestGoFiles) > 0, // or else there's no point in any of this. -func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p *Package, cover *TestCover) (pmain, ptest, pxtest *Package) { +func TestPackagesAndErrors(loaderstate *modload.State, ctx context.Context, done func(), opts PackageOpts, p *Package, cover *TestCover) (pmain, ptest, pxtest *Package) { ctx, span := trace.StartSpan(ctx, "load.TestPackagesAndErrors") defer span.Done() @@ -106,7 +107,7 @@ func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p defer pre.flush() allImports := append([]string{}, p.TestImports...) allImports = append(allImports, p.XTestImports...) - pre.preloadImports(ctx, opts, allImports, p.Internal.Build) + pre.preloadImports(loaderstate, ctx, opts, allImports, p.Internal.Build) var ptestErr, pxtestErr *PackageError var imports, ximports []*Package @@ -116,7 +117,7 @@ func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p stk.Push(ImportInfo{Pkg: p.ImportPath + " (test)"}) rawTestImports := str.StringList(p.TestImports) for i, path := range p.TestImports { - p1, err := loadImport(ctx, opts, pre, path, p.Dir, p, &stk, p.Internal.Build.TestImportPos[path], ResolveImport) + p1, err := loadImport(loaderstate, ctx, opts, pre, path, p.Dir, p, &stk, p.Internal.Build.TestImportPos[path], ResolveImport) if err != nil && ptestErr == nil { ptestErr = err incomplete = true @@ -145,7 +146,7 @@ func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p var pxtestIncomplete bool rawXTestImports := str.StringList(p.XTestImports) for i, path := range p.XTestImports { - p1, err := loadImport(ctx, opts, pre, path, p.Dir, p, &stk, p.Internal.Build.XTestImportPos[path], ResolveImport) + p1, err := loadImport(loaderstate, ctx, opts, pre, path, p.Dir, p, &stk, p.Internal.Build.XTestImportPos[path], ResolveImport) if err != nil && pxtestErr == nil { pxtestErr = err } @@ -292,7 +293,7 @@ func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p } pb := p.Internal.Build - pmain.DefaultGODEBUG = defaultGODEBUG(pmain, pb.Directives, pb.TestDirectives, pb.XTestDirectives) + pmain.DefaultGODEBUG = defaultGODEBUG(loaderstate, pmain, pb.Directives, pb.TestDirectives, pb.XTestDirectives) if pmain.Internal.BuildInfo == nil || pmain.DefaultGODEBUG != p.DefaultGODEBUG { // Either we didn't generate build info for the package under test (because it wasn't package main), or // the DefaultGODEBUG used to build the test main package is different from the DefaultGODEBUG @@ -310,7 +311,7 @@ func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p if cover != nil { deps = append(deps, "internal/coverage/cfile") } - ldDeps, err := LinkerDeps(p) + ldDeps, err := LinkerDeps(loaderstate, p) if err != nil && pmain.Error == nil { pmain.Error = &PackageError{Err: err} } @@ -321,7 +322,7 @@ func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p if dep == ptest.ImportPath { pmain.Internal.Imports = append(pmain.Internal.Imports, ptest) } else { - p1, err := loadImport(ctx, opts, pre, dep, "", nil, &stk, nil, 0) + p1, err := loadImport(loaderstate, ctx, opts, pre, dep, "", nil, &stk, nil, 0) if err != nil && pmain.Error == nil { pmain.Error = err pmain.Incomplete = true @@ -336,7 +337,7 @@ func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p allTestImports = append(allTestImports, pmain.Internal.Imports...) allTestImports = append(allTestImports, imports...) allTestImports = append(allTestImports, ximports...) - setToolFlags(allTestImports...) + setToolFlags(loaderstate, allTestImports...) // Do initial scan for metadata needed for writing _testmain.go // Use that metadata to update the list of imports for package main. @@ -649,6 +650,14 @@ func (t *testFuncs) ImportPath() string { return pkg } +func (t *testFuncs) ModulePath() string { + m := t.Package.Module + if m == nil { + return "" + } + return m.Path +} + // Covered returns a string describing which packages are being tested for coverage. // If the covered package is the same as the tested package, it returns the empty string. // Otherwise it is a comma-separated human-readable list of packages beginning with @@ -836,6 +845,7 @@ func init() { testdeps.CoverMarkProfileEmittedFunc = cfile.MarkProfileEmitted {{end}} + testdeps.ModulePath = {{.ModulePath | printf "%q"}} testdeps.ImportPath = {{.ImportPath | printf "%q"}} } diff --git a/src/cmd/go/internal/lockedfile/lockedfile.go b/src/cmd/go/internal/lockedfile/lockedfile.go index 8bd2ffbe8f7..f48124ffbc0 100644 --- a/src/cmd/go/internal/lockedfile/lockedfile.go +++ b/src/cmd/go/internal/lockedfile/lockedfile.go @@ -94,6 +94,11 @@ func (f *File) Close() error { err := closeFile(f.osFile.File) f.cleanup.Stop() + // f may be dead at the moment after we access f.cleanup, + // so the cleanup can fire before Stop completes. Keep f + // alive while we call Stop. See the documentation for + // runtime.Cleanup.Stop. + runtime.KeepAlive(f) return err } diff --git a/src/cmd/go/internal/modcmd/download.go b/src/cmd/go/internal/modcmd/download.go index 2f4feae8f25..150d0c88607 100644 --- a/src/cmd/go/internal/modcmd/download.go +++ b/src/cmd/go/internal/modcmd/download.go @@ -109,18 +109,19 @@ type ModuleJSON struct { } func runDownload(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() + moduleLoaderState := modload.NewState() + moduleLoaderState.InitWorkfile() // Check whether modules are enabled and whether we're in a module. - modload.ForceUseModules = true + moduleLoaderState.ForceUseModules = true modload.ExplicitWriteGoMod = true haveExplicitArgs := len(args) > 0 - if modload.HasModRoot() || modload.WorkFilePath() != "" { - modload.LoadModFile(ctx) // to fill MainModules + if moduleLoaderState.HasModRoot() || modload.WorkFilePath(moduleLoaderState) != "" { + modload.LoadModFile(moduleLoaderState, ctx) // to fill MainModules if haveExplicitArgs { - for _, mainModule := range modload.MainModules.Versions() { + for _, mainModule := range moduleLoaderState.MainModules.Versions() { targetAtUpgrade := mainModule.Path + "@upgrade" targetAtPatch := mainModule.Path + "@patch" for _, arg := range args { @@ -130,14 +131,14 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { } } } - } else if modload.WorkFilePath() != "" { + } else if modload.WorkFilePath(moduleLoaderState) != "" { // TODO(#44435): Think about what the correct query is to download the // right set of modules. Also see code review comment at // https://go-review.googlesource.com/c/go/+/359794/comments/ce946a80_6cf53992. args = []string{"all"} } else { - mainModule := modload.MainModules.Versions()[0] - modFile := modload.MainModules.ModFile(mainModule) + mainModule := moduleLoaderState.MainModules.Versions()[0] + modFile := moduleLoaderState.MainModules.ModFile(mainModule) if modFile.Go == nil || gover.Compare(modFile.Go.Version, gover.ExplicitIndirectVersion) < 0 { if len(modFile.Require) > 0 { args = []string{"all"} @@ -153,12 +154,12 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { // However, we also need to load the full module graph, to ensure that // we have downloaded enough of the module graph to run 'go list all', // 'go mod graph', and similar commands. - _, err := modload.LoadModGraph(ctx, "") + _, err := modload.LoadModGraph(moduleLoaderState, ctx, "") if err != nil { // TODO(#64008): call base.Fatalf instead of toolchain.SwitchOrFatal // here, since we can only reach this point with an outdated toolchain // if the go.mod file is inconsistent. - toolchain.SwitchOrFatal(ctx, err) + toolchain.SwitchOrFatal(moduleLoaderState, ctx, err) } for _, m := range modFile.Require { @@ -169,7 +170,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { } if len(args) == 0 { - if modload.HasModRoot() { + if moduleLoaderState.HasModRoot() { os.Stderr.WriteString("go: no module dependencies to download\n") } else { base.Errorf("go: no modules specified (see 'go help mod download')") @@ -177,14 +178,14 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { base.Exit() } - if *downloadReuse != "" && modload.HasModRoot() { + if *downloadReuse != "" && moduleLoaderState.HasModRoot() { base.Fatalf("go mod download -reuse cannot be used inside a module") } var mods []*ModuleJSON type token struct{} sem := make(chan token, runtime.GOMAXPROCS(0)) - infos, infosErr := modload.ListModules(ctx, args, 0, *downloadReuse) + infos, infosErr := modload.ListModules(moduleLoaderState, ctx, args, 0, *downloadReuse) // There is a bit of a chicken-and-egg problem here: ideally we need to know // which Go version to switch to download the requested modules, but if we @@ -211,7 +212,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { // toolchain version) or only one module (as is used by the Go Module Proxy). if infosErr != nil { - var sw toolchain.Switcher + sw := toolchain.NewSwitcher(moduleLoaderState) sw.Error(infosErr) if sw.NeedSwitch() { sw.Switch(ctx) @@ -220,7 +221,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { // when we can. } - if !haveExplicitArgs && modload.WorkFilePath() == "" { + if !haveExplicitArgs && modload.WorkFilePath(moduleLoaderState) == "" { // 'go mod download' is sometimes run without arguments to pre-populate the // module cache. In modules that aren't at go 1.17 or higher, it may fetch // modules that aren't needed to build packages in the main module. This is @@ -231,7 +232,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { // TODO(#64008): In the future, report an error if go.mod or go.sum need to // be updated after loading the build list. This may require setting // the mode to "mod" or "readonly" depending on haveExplicitArgs. - if err := modload.WriteGoMod(ctx, modload.WriteOpts{}); err != nil { + if err := modload.WriteGoMod(moduleLoaderState, ctx, modload.WriteOpts{}); err != nil { base.Fatal(err) } } @@ -291,8 +292,8 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { // with no arguments we download the module pattern "all", // which may include dependencies that are normally pruned out // of the individual modules in the workspace. - if haveExplicitArgs || modload.WorkFilePath() != "" { - var sw toolchain.Switcher + if haveExplicitArgs || modload.WorkFilePath(moduleLoaderState) != "" { + sw := toolchain.NewSwitcher(moduleLoaderState) // Add errors to the Switcher in deterministic order so that they will be // logged deterministically. for _, m := range mods { @@ -347,8 +348,8 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { // // Don't save sums for 'go mod download' without arguments unless we're in // workspace mode; see comment above. - if haveExplicitArgs || modload.WorkFilePath() != "" { - if err := modload.WriteGoMod(ctx, modload.WriteOpts{}); err != nil { + if haveExplicitArgs || modload.WorkFilePath(moduleLoaderState) != "" { + if err := modload.WriteGoMod(moduleLoaderState, ctx, modload.WriteOpts{}); err != nil { base.Error(err) } } diff --git a/src/cmd/go/internal/modcmd/edit.go b/src/cmd/go/internal/modcmd/edit.go index 041b4432bfd..69ebb14813b 100644 --- a/src/cmd/go/internal/modcmd/edit.go +++ b/src/cmd/go/internal/modcmd/edit.go @@ -209,6 +209,7 @@ func init() { } func runEdit(ctx context.Context, cmd *base.Command, args []string) { + moduleLoaderState := modload.NewState() anyFlags := *editModule != "" || *editGo != "" || *editToolchain != "" || @@ -232,7 +233,7 @@ func runEdit(ctx context.Context, cmd *base.Command, args []string) { if len(args) == 1 { gomod = args[0] } else { - gomod = modload.ModFilePath() + gomod = moduleLoaderState.ModFilePath() } if *editModule != "" { @@ -583,8 +584,9 @@ func flagDropIgnore(arg string) { // fileJSON is the -json output data structure. type fileJSON struct { Module editModuleJSON - Go string `json:",omitempty"` - Toolchain string `json:",omitempty"` + Go string `json:",omitempty"` + Toolchain string `json:",omitempty"` + GoDebug []debugJSON `json:",omitempty"` Require []requireJSON Exclude []module.Version Replace []replaceJSON @@ -598,6 +600,11 @@ type editModuleJSON struct { Deprecated string `json:",omitempty"` } +type debugJSON struct { + Key string + Value string +} + type requireJSON struct { Path string Version string `json:",omitempty"` @@ -656,6 +663,9 @@ func editPrintJSON(modFile *modfile.File) { for _, i := range modFile.Ignore { f.Ignore = append(f.Ignore, ignoreJSON{i.Path}) } + for _, d := range modFile.Godebug { + f.GoDebug = append(f.GoDebug, debugJSON{d.Key, d.Value}) + } data, err := json.MarshalIndent(&f, "", "\t") if err != nil { base.Fatalf("go: internal error: %v", err) diff --git a/src/cmd/go/internal/modcmd/graph.go b/src/cmd/go/internal/modcmd/graph.go index 172c1dda5ce..307c6ee4b56 100644 --- a/src/cmd/go/internal/modcmd/graph.go +++ b/src/cmd/go/internal/modcmd/graph.go @@ -52,23 +52,24 @@ func init() { } func runGraph(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() + moduleLoaderState := modload.NewState() + moduleLoaderState.InitWorkfile() if len(args) > 0 { base.Fatalf("go: 'go mod graph' accepts no arguments") } - modload.ForceUseModules = true - modload.RootMode = modload.NeedRoot + moduleLoaderState.ForceUseModules = true + moduleLoaderState.RootMode = modload.NeedRoot goVersion := graphGo.String() if goVersion != "" && gover.Compare(gover.Local(), goVersion) < 0 { - toolchain.SwitchOrFatal(ctx, &gover.TooNewError{ + toolchain.SwitchOrFatal(moduleLoaderState, ctx, &gover.TooNewError{ What: "-go flag", GoVersion: goVersion, }) } - mg, err := modload.LoadModGraph(ctx, goVersion) + mg, err := modload.LoadModGraph(moduleLoaderState, ctx, goVersion) if err != nil { base.Fatal(err) } diff --git a/src/cmd/go/internal/modcmd/init.go b/src/cmd/go/internal/modcmd/init.go index 356a0569913..e8db3d005f7 100644 --- a/src/cmd/go/internal/modcmd/init.go +++ b/src/cmd/go/internal/modcmd/init.go @@ -35,6 +35,7 @@ func init() { } func runInit(ctx context.Context, cmd *base.Command, args []string) { + moduleLoaderState := modload.NewState() if len(args) > 1 { base.Fatalf("go: 'go mod init' accepts at most one argument") } @@ -43,6 +44,6 @@ func runInit(ctx context.Context, cmd *base.Command, args []string) { modPath = args[0] } - modload.ForceUseModules = true - modload.CreateModFile(ctx, modPath) // does all the hard work + moduleLoaderState.ForceUseModules = true + modload.CreateModFile(moduleLoaderState, ctx, modPath) // does all the hard work } diff --git a/src/cmd/go/internal/modcmd/tidy.go b/src/cmd/go/internal/modcmd/tidy.go index 2efa33a7c34..3ac0109625e 100644 --- a/src/cmd/go/internal/modcmd/tidy.go +++ b/src/cmd/go/internal/modcmd/tidy.go @@ -105,6 +105,7 @@ func (f *goVersionFlag) Set(s string) error { } func runTidy(ctx context.Context, cmd *base.Command, args []string) { + moduleLoaderState := modload.NewState() if len(args) > 0 { base.Fatalf("go: 'go mod tidy' accepts no arguments") } @@ -119,18 +120,18 @@ func runTidy(ctx context.Context, cmd *base.Command, args []string) { // those packages. In order to make 'go test' reproducible for the packages // that are in 'all' but outside of the main module, we must explicitly // request that their test dependencies be included. - modload.ForceUseModules = true - modload.RootMode = modload.NeedRoot + moduleLoaderState.ForceUseModules = true + moduleLoaderState.RootMode = modload.NeedRoot goVersion := tidyGo.String() if goVersion != "" && gover.Compare(gover.Local(), goVersion) < 0 { - toolchain.SwitchOrFatal(ctx, &gover.TooNewError{ + toolchain.SwitchOrFatal(moduleLoaderState, ctx, &gover.TooNewError{ What: "-go flag", GoVersion: goVersion, }) } - modload.LoadPackages(ctx, modload.PackageOpts{ + modload.LoadPackages(moduleLoaderState, ctx, modload.PackageOpts{ TidyGoVersion: tidyGo.String(), Tags: imports.AnyTags(), Tidy: true, @@ -141,6 +142,6 @@ func runTidy(ctx context.Context, cmd *base.Command, args []string) { LoadTests: true, AllowErrors: tidyE, SilenceMissingStdImports: true, - Switcher: new(toolchain.Switcher), + Switcher: toolchain.NewSwitcher(moduleLoaderState), }, "all") } diff --git a/src/cmd/go/internal/modcmd/vendor.go b/src/cmd/go/internal/modcmd/vendor.go index e1a9081a95f..5782f4e7944 100644 --- a/src/cmd/go/internal/modcmd/vendor.go +++ b/src/cmd/go/internal/modcmd/vendor.go @@ -66,19 +66,20 @@ func init() { } func runVendor(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() - if modload.WorkFilePath() != "" { + moduleLoaderState := modload.NewState() + moduleLoaderState.InitWorkfile() + if modload.WorkFilePath(moduleLoaderState) != "" { base.Fatalf("go: 'go mod vendor' cannot be run in workspace mode. Run 'go work vendor' to vendor the workspace or set 'GOWORK=off' to exit workspace mode.") } - RunVendor(ctx, vendorE, vendorO, args) + RunVendor(moduleLoaderState, ctx, vendorE, vendorO, args) } -func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) { +func RunVendor(loaderstate *modload.State, ctx context.Context, vendorE bool, vendorO string, args []string) { if len(args) != 0 { base.Fatalf("go: 'go mod vendor' accepts no arguments") } - modload.ForceUseModules = true - modload.RootMode = modload.NeedRoot + loaderstate.ForceUseModules = true + loaderstate.RootMode = modload.NeedRoot loadOpts := modload.PackageOpts{ Tags: imports.AnyTags(), @@ -88,7 +89,7 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) AllowErrors: vendorE, SilenceMissingStdImports: true, } - _, pkgs := modload.LoadPackages(ctx, loadOpts, "all") + _, pkgs := modload.LoadPackages(loaderstate, ctx, loadOpts, "all") var vdir string switch { @@ -97,7 +98,7 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) case vendorO != "": vdir = filepath.Join(base.Cwd(), vendorO) default: - vdir = filepath.Join(modload.VendorDir()) + vdir = filepath.Join(modload.VendorDir(loaderstate)) } if err := os.RemoveAll(vdir); err != nil { base.Fatal(err) @@ -106,7 +107,7 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) modpkgs := make(map[module.Version][]string) for _, pkg := range pkgs { m := modload.PackageModule(pkg) - if m.Path == "" || modload.MainModules.Contains(m.Path) { + if m.Path == "" || loaderstate.MainModules.Contains(m.Path) { continue } modpkgs[m] = append(modpkgs[m], pkg) @@ -116,13 +117,13 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) includeAllReplacements := false includeGoVersions := false isExplicit := map[module.Version]bool{} - gv := modload.MainModules.GoVersion() - if gover.Compare(gv, "1.14") >= 0 && (modload.FindGoWork(base.Cwd()) != "" || modload.ModFile().Go != nil) { + gv := loaderstate.MainModules.GoVersion(loaderstate) + if gover.Compare(gv, "1.14") >= 0 && (loaderstate.FindGoWork(base.Cwd()) != "" || modload.ModFile(loaderstate).Go != nil) { // If the Go version is at least 1.14, annotate all explicit 'require' and // 'replace' targets found in the go.mod file so that we can perform a // stronger consistency check when -mod=vendor is set. - for _, m := range modload.MainModules.Versions() { - if modFile := modload.MainModules.ModFile(m); modFile != nil { + for _, m := range loaderstate.MainModules.Versions() { + if modFile := loaderstate.MainModules.ModFile(m); modFile != nil { for _, r := range modFile.Require { isExplicit[r.Mod] = true } @@ -156,20 +157,20 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) w = io.MultiWriter(&buf, os.Stderr) } - if modload.MainModules.WorkFile() != nil { + if loaderstate.MainModules.WorkFile() != nil { fmt.Fprintf(w, "## workspace\n") } replacementWritten := make(map[module.Version]bool) for _, m := range vendorMods { - replacement := modload.Replacement(m) + replacement := modload.Replacement(loaderstate, m) line := moduleLine(m, replacement) replacementWritten[m] = true io.WriteString(w, line) goVersion := "" if includeGoVersions { - goVersion = modload.ModuleInfo(ctx, m.Path).GoVersion + goVersion = modload.ModuleInfo(loaderstate, ctx, m.Path).GoVersion } switch { case isExplicit[m] && goVersion != "": @@ -184,7 +185,7 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) sort.Strings(pkgs) for _, pkg := range pkgs { fmt.Fprintf(w, "%s\n", pkg) - vendorPkg(vdir, pkg) + vendorPkg(loaderstate, vdir, pkg) } } @@ -192,8 +193,8 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) // Record unused and wildcard replacements at the end of the modules.txt file: // without access to the complete build list, the consumer of the vendor // directory can't otherwise determine that those replacements had no effect. - for _, m := range modload.MainModules.Versions() { - if workFile := modload.MainModules.WorkFile(); workFile != nil { + for _, m := range loaderstate.MainModules.Versions() { + if workFile := loaderstate.MainModules.WorkFile(); workFile != nil { for _, r := range workFile.Replace { if replacementWritten[r.Old] { // We already recorded this replacement. @@ -208,14 +209,14 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) } } } - if modFile := modload.MainModules.ModFile(m); modFile != nil { + if modFile := loaderstate.MainModules.ModFile(m); modFile != nil { for _, r := range modFile.Replace { if replacementWritten[r.Old] { // We already recorded this replacement. continue } replacementWritten[r.Old] = true - rNew := modload.Replacement(r.Old) + rNew := modload.Replacement(loaderstate, r.Old) if rNew == (module.Version{}) { // There is no replacement. Don't try to write it. continue @@ -268,8 +269,8 @@ func moduleLine(m, r module.Version) string { return b.String() } -func vendorPkg(vdir, pkg string) { - src, realPath, _ := modload.Lookup("", false, pkg) +func vendorPkg(s *modload.State, vdir, pkg string) { + src, realPath, _ := modload.Lookup(s, "", false, pkg) if src == "" { base.Errorf("internal error: no pkg for %s\n", pkg) return @@ -288,7 +289,11 @@ func vendorPkg(vdir, pkg string) { copiedFiles := make(map[string]bool) dst := filepath.Join(vdir, pkg) - copyDir(dst, src, matchPotentialSourceFile, copiedFiles) + matcher := func(dir string, info fs.DirEntry) bool { + goVersion := s.MainModules.GoVersion(s) + return matchPotentialSourceFile(dir, info, goVersion) + } + copyDir(dst, src, matcher, copiedFiles) if m := modload.PackageModule(realPath); m.Path != "" { copyMetadata(m.Path, realPath, dst, src, copiedFiles) } @@ -315,7 +320,7 @@ func vendorPkg(vdir, pkg string) { } } var embedPatterns []string - if gover.Compare(modload.MainModules.GoVersion(), "1.22") >= 0 { + if gover.Compare(s.MainModules.GoVersion(s), "1.22") >= 0 { embedPatterns = bp.EmbedPatterns } else { // Maintain the behavior of https://github.com/golang/go/issues/63473 @@ -426,12 +431,12 @@ func matchMetadata(dir string, info fs.DirEntry) bool { } // matchPotentialSourceFile reports whether info may be relevant to a build operation. -func matchPotentialSourceFile(dir string, info fs.DirEntry) bool { +func matchPotentialSourceFile(dir string, info fs.DirEntry, goVersion string) bool { if strings.HasSuffix(info.Name(), "_test.go") { return false } if info.Name() == "go.mod" || info.Name() == "go.sum" { - if gv := modload.MainModules.GoVersion(); gover.Compare(gv, "1.17") >= 0 { + if gover.Compare(goVersion, "1.17") >= 0 { // As of Go 1.17, we strip go.mod and go.sum files from dependency modules. // Otherwise, 'go' commands invoked within the vendor subtree may misidentify // an arbitrary directory within the vendor tree as a module root. diff --git a/src/cmd/go/internal/modcmd/verify.go b/src/cmd/go/internal/modcmd/verify.go index d07f730c5d0..d654ba26a4b 100644 --- a/src/cmd/go/internal/modcmd/verify.go +++ b/src/cmd/go/internal/modcmd/verify.go @@ -44,20 +44,21 @@ func init() { } func runVerify(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() + moduleLoaderState := modload.NewState() + moduleLoaderState.InitWorkfile() if len(args) != 0 { // NOTE(rsc): Could take a module pattern. base.Fatalf("go: verify takes no arguments") } - modload.ForceUseModules = true - modload.RootMode = modload.NeedRoot + moduleLoaderState.ForceUseModules = true + moduleLoaderState.RootMode = modload.NeedRoot // Only verify up to GOMAXPROCS zips at once. type token struct{} sem := make(chan token, runtime.GOMAXPROCS(0)) - mg, err := modload.LoadModGraph(ctx, "") + mg, err := modload.LoadModGraph(moduleLoaderState, ctx, "") if err != nil { base.Fatal(err) } @@ -71,7 +72,7 @@ func runVerify(ctx context.Context, cmd *base.Command, args []string) { errsChans[i] = errsc mod := mod // use a copy to avoid data races go func() { - errsc <- verifyMod(ctx, mod) + errsc <- verifyMod(moduleLoaderState, ctx, mod) <-sem }() } @@ -89,12 +90,12 @@ func runVerify(ctx context.Context, cmd *base.Command, args []string) { } } -func verifyMod(ctx context.Context, mod module.Version) []error { +func verifyMod(loaderstate *modload.State, ctx context.Context, mod module.Version) []error { if gover.IsToolchain(mod.Path) { // "go" and "toolchain" have no disk footprint; nothing to verify. return nil } - if modload.MainModules.Contains(mod.Path) { + if loaderstate.MainModules.Contains(mod.Path) { return nil } var errs []error diff --git a/src/cmd/go/internal/modcmd/why.go b/src/cmd/go/internal/modcmd/why.go index 198672d8064..b52b9354c29 100644 --- a/src/cmd/go/internal/modcmd/why.go +++ b/src/cmd/go/internal/modcmd/why.go @@ -63,9 +63,10 @@ func init() { } func runWhy(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() - modload.ForceUseModules = true - modload.RootMode = modload.NeedRoot + moduleLoaderState := modload.NewState() + moduleLoaderState.InitWorkfile() + moduleLoaderState.ForceUseModules = true + moduleLoaderState.RootMode = modload.NeedRoot modload.ExplicitWriteGoMod = true // don't write go.mod in ListModules loadOpts := modload.PackageOpts{ @@ -83,13 +84,13 @@ func runWhy(ctx context.Context, cmd *base.Command, args []string) { } } - mods, err := modload.ListModules(ctx, args, 0, "") + mods, err := modload.ListModules(moduleLoaderState, ctx, args, 0, "") if err != nil { base.Fatal(err) } byModule := make(map[string][]string) - _, pkgs := modload.LoadPackages(ctx, loadOpts, "all") + _, pkgs := modload.LoadPackages(moduleLoaderState, ctx, loadOpts, "all") for _, path := range pkgs { m := modload.PackageModule(path) if m.Path != "" { @@ -120,9 +121,9 @@ func runWhy(ctx context.Context, cmd *base.Command, args []string) { } } else { // Resolve to packages. - matches, _ := modload.LoadPackages(ctx, loadOpts, args...) + matches, _ := modload.LoadPackages(moduleLoaderState, ctx, loadOpts, args...) - modload.LoadPackages(ctx, loadOpts, "all") // rebuild graph, from main module (not from named packages) + modload.LoadPackages(moduleLoaderState, ctx, loadOpts, "all") // rebuild graph, from main module (not from named packages) sep := "" for _, m := range matches { diff --git a/src/cmd/go/internal/modfetch/cache.go b/src/cmd/go/internal/modfetch/cache.go index 9c34581a910..30020d24a71 100644 --- a/src/cmd/go/internal/modfetch/cache.go +++ b/src/cmd/go/internal/modfetch/cache.go @@ -622,10 +622,11 @@ func writeDiskStat(ctx context.Context, file string, info *RevInfo) error { o := *info.Origin info.Origin = &o - // Tags never matter if you are starting with a semver version, + // Tags and RepoSum never matter if you are starting with a semver version, // as we would be when finding this cache entry. o.TagSum = "" o.TagPrefix = "" + o.RepoSum = "" // Ref doesn't matter if you have a pseudoversion. if module.IsPseudoVersion(info.Version) { o.Ref = "" diff --git a/src/cmd/go/internal/modfetch/codehost/codehost.go b/src/cmd/go/internal/modfetch/codehost/codehost.go index edb87e40891..08b1216d6bf 100644 --- a/src/cmd/go/internal/modfetch/codehost/codehost.go +++ b/src/cmd/go/internal/modfetch/codehost/codehost.go @@ -116,9 +116,12 @@ type Origin struct { Ref string `json:",omitempty"` // If RepoSum is non-empty, then the resolution of this module version - // failed due to the repo being available but the version not being present. - // This depends on the entire state of the repo, which RepoSum summarizes. - // For Git, this is a hash of all the refs and their hashes. + // depends on the entire state of the repo, which RepoSum summarizes. + // For Git, this is a hash of all the refs and their hashes, and the RepoSum + // is only needed for module versions that don't exist. + // For Mercurial, this is a hash of all the branches and their heads' hashes, + // since the set of available tags is dervied from .hgtags files in those branches, + // and the RepoSum is used for all module versions, available and not, RepoSum string `json:",omitempty"` } diff --git a/src/cmd/go/internal/modfetch/codehost/git.go b/src/cmd/go/internal/modfetch/codehost/git.go index 74c4c646cdc..b615fc0f13b 100644 --- a/src/cmd/go/internal/modfetch/codehost/git.go +++ b/src/cmd/go/internal/modfetch/codehost/git.go @@ -794,15 +794,18 @@ func (r *gitRepo) RecentTag(ctx context.Context, rev, prefix string, allowed fun // There are plausible tags, but we don't know if rev is a descendent of any of them. // Fetch the history to find out. + // Note: do not use defer unlock, because describe calls allowed, + // which uses retracted, which calls ReadFile, which may end up + // back at a method that acquires r.mu. unlock, err := r.mu.Lock() if err != nil { return "", err } - defer unlock() - if err := r.fetchRefsLocked(ctx); err != nil { + unlock() return "", err } + unlock() // If we've reached this point, we have all of the commits that are reachable // from all heads and tags. diff --git a/src/cmd/go/internal/modfetch/codehost/git_test.go b/src/cmd/go/internal/modfetch/codehost/git_test.go index e8884abdfed..e032a14e124 100644 --- a/src/cmd/go/internal/modfetch/codehost/git_test.go +++ b/src/cmd/go/internal/modfetch/codehost/git_test.go @@ -16,14 +16,18 @@ import ( "io/fs" "log" "os" + "os/exec" "path" "path/filepath" "reflect" + "regexp" "runtime" "strings" "sync" "testing" "time" + + "golang.org/x/mod/semver" ) func TestMain(m *testing.M) { @@ -192,9 +196,29 @@ func testRepo(ctx context.Context, t *testing.T, remote string) (Repo, error) { return NewRepo(ctx, vcsName, remote, false) } +var gitVersLineExtract = regexp.MustCompile(`git version\s+([\d.]+)`) + +func gitVersion(t testing.TB) string { + gitOut, runErr := exec.Command("git", "version").CombinedOutput() + if runErr != nil { + t.Logf("failed to execute git version: %s", runErr) + return "v0" + } + matches := gitVersLineExtract.FindSubmatch(gitOut) + if len(matches) < 2 { + t.Logf("git version extraction regexp did not match version line: %q", gitOut) + return "v0" + } + return "v" + string(matches[1]) +} + +const minGitSHA256Vers = "v2.29" + func TestTags(t *testing.T) { t.Parallel() + gitVers := gitVersion(t) + type tagsTest struct { repo string prefix string @@ -204,6 +228,9 @@ func TestTags(t *testing.T) { runTest := func(tt tagsTest) func(*testing.T) { return func(t *testing.T) { t.Parallel() + if tt.repo == gitsha256repo && semver.Compare(gitVers, minGitSHA256Vers) < 0 { + t.Skipf("git version is too old (%+v); skipping git sha256 test", gitVers) + } ctx := testContext(t) r, err := testRepo(ctx, t, tt.repo) @@ -288,6 +315,8 @@ func TestTags(t *testing.T) { func TestLatest(t *testing.T) { t.Parallel() + gitVers := gitVersion(t) + type latestTest struct { repo string info *RevInfo @@ -297,6 +326,10 @@ func TestLatest(t *testing.T) { t.Parallel() ctx := testContext(t) + if tt.repo == gitsha256repo && semver.Compare(gitVers, minGitSHA256Vers) < 0 { + t.Skipf("git version is too old (%+v); skipping git sha256 test", gitVers) + } + r, err := testRepo(ctx, t, tt.repo) if err != nil { t.Fatal(err) @@ -350,12 +383,13 @@ func TestLatest(t *testing.T) { Origin: &Origin{ VCS: "hg", URL: hgrepo1, - Hash: "18518c07eb8ed5c80221e997e518cccaa8c0c287", + Ref: "tip", + Hash: "745aacc8b24decc44ac2b13870f5472b479f4d72", }, - Name: "18518c07eb8ed5c80221e997e518cccaa8c0c287", - Short: "18518c07eb8e", - Version: "18518c07eb8ed5c80221e997e518cccaa8c0c287", - Time: time.Date(2018, 6, 27, 16, 16, 30, 0, time.UTC), + Name: "745aacc8b24decc44ac2b13870f5472b479f4d72", + Short: "745aacc8b24d", + Version: "745aacc8b24decc44ac2b13870f5472b479f4d72", + Time: time.Date(2018, 6, 27, 16, 16, 10, 0, time.UTC), }, }, } { @@ -375,6 +409,8 @@ func TestLatest(t *testing.T) { func TestReadFile(t *testing.T) { t.Parallel() + gitVers := gitVersion(t) + type readFileTest struct { repo string rev string @@ -387,6 +423,10 @@ func TestReadFile(t *testing.T) { t.Parallel() ctx := testContext(t) + if tt.repo == gitsha256repo && semver.Compare(gitVers, minGitSHA256Vers) < 0 { + t.Skipf("git version is too old (%+v); skipping git sha256 test", gitVers) + } + r, err := testRepo(ctx, t, tt.repo) if err != nil { t.Fatal(err) @@ -468,6 +508,8 @@ type zipFile struct { func TestReadZip(t *testing.T) { t.Parallel() + gitVers := gitVersion(t) + type readZipTest struct { repo string rev string @@ -480,6 +522,10 @@ func TestReadZip(t *testing.T) { t.Parallel() ctx := testContext(t) + if tt.repo == gitsha256repo && semver.Compare(gitVers, minGitSHA256Vers) < 0 { + t.Skipf("git version is too old (%+v); skipping git sha256 test", gitVers) + } + r, err := testRepo(ctx, t, tt.repo) if err != nil { t.Fatal(err) @@ -630,7 +676,6 @@ func TestReadZip(t *testing.T) { subdir: "", files: map[string]uint64{ "prefix/.hg_archival.txt": ^uint64(0), - "prefix/.hgtags": 405, "prefix/v3/sub/dir/file.txt": 16, "prefix/README": 0, }, @@ -743,16 +788,18 @@ func TestReadZip(t *testing.T) { } var hgmap = map[string]string{ - "HEAD": "41964ddce1180313bdc01d0a39a2813344d6261d", // not tip due to bad hgrepo1 conversion - "9d02800338b8a55be062c838d1f02e0c5780b9eb": "8f49ee7a6ddcdec6f0112d9dca48d4a2e4c3c09e", - "76a00fb249b7f93091bc2c89a789dab1fc1bc26f": "88fde824ec8b41a76baa16b7e84212cee9f3edd0", - "ede458df7cd0fdca520df19a33158086a8a68e81": "41964ddce1180313bdc01d0a39a2813344d6261d", - "97f6aa59c81c623494825b43d39e445566e429a4": "c0cbbfb24c7c3c50c35c7b88e7db777da4ff625d", + "HEAD": "c0186fb00e50985709b12266419f50bf11860166", + "9d02800338b8a55be062c838d1f02e0c5780b9eb": "b1ed98abc2683d326f89b924875bf14bd584898e", // v2.0.2, v2 + "76a00fb249b7f93091bc2c89a789dab1fc1bc26f": "a546811101e11d6aff2ac72072d2d439b3a88f33", // v2.3, v2.0.1 + "ede458df7cd0fdca520df19a33158086a8a68e81": "c0186fb00e50985709b12266419f50bf11860166", // v1.2.3, v1.2.4-annotated + "97f6aa59c81c623494825b43d39e445566e429a4": "c1638e3673b121d9c83e92166fce2a25dcadd6cb", // foo.txt commit on v2.3.4 branch } func TestStat(t *testing.T) { t.Parallel() + gitVers := gitVersion(t) + type statTest struct { repo string rev string @@ -764,6 +811,10 @@ func TestStat(t *testing.T) { t.Parallel() ctx := testContext(t) + if tt.repo == gitsha256repo && semver.Compare(gitVers, minGitSHA256Vers) < 0 { + t.Skipf("git version is too old (%+v); skipping git sha256 test", gitVers) + } + r, err := testRepo(ctx, t, tt.repo) if err != nil { t.Fatal(err) diff --git a/src/cmd/go/internal/modfetch/codehost/vcs.go b/src/cmd/go/internal/modfetch/codehost/vcs.go index d80397502b4..aae1a60bfac 100644 --- a/src/cmd/go/internal/modfetch/codehost/vcs.go +++ b/src/cmd/go/internal/modfetch/codehost/vcs.go @@ -17,12 +17,16 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" "cmd/go/internal/base" + "cmd/go/internal/cfg" "cmd/go/internal/lockedfile" "cmd/go/internal/str" "cmd/internal/par" + + "golang.org/x/mod/semver" ) // A VCSError indicates an error using a version control system. @@ -80,6 +84,10 @@ type vcsRepo struct { fetchOnce sync.Once fetchErr error + fetched atomic.Bool + + repoSumOnce sync.Once + repoSum string } func newVCSRepo(ctx context.Context, vcs, remote string, local bool) (Repo, error) { @@ -129,6 +137,9 @@ func newVCSRepo(ctx context.Context, vcs, remote string, local bool) (Repo, erro return nil, err } _, err = Run(ctx, r.dir, cmd.init(r.remote)) + if err == nil && cmd.postInit != nil { + err = cmd.postInit(ctx, r) + } release() if err != nil { @@ -142,20 +153,29 @@ func newVCSRepo(ctx context.Context, vcs, remote string, local bool) (Repo, erro const vcsWorkDirType = "vcs1." type vcsCmd struct { - vcs string // vcs name "hg" - init func(remote string) []string // cmd to init repo to track remote - tags func(remote string) []string // cmd to list local tags - tagRE *lazyregexp.Regexp // regexp to extract tag names from output of tags cmd - branches func(remote string) []string // cmd to list local branches - branchRE *lazyregexp.Regexp // regexp to extract branch names from output of tags cmd - badLocalRevRE *lazyregexp.Regexp // regexp of names that must not be served out of local cache without doing fetch first - statLocal func(rev, remote string) []string // cmd to stat local rev - parseStat func(rev, out string) (*RevInfo, error) // cmd to parse output of statLocal - fetch []string // cmd to fetch everything from remote - latest string // name of latest commit on remote (tip, HEAD, etc) - readFile func(rev, file, remote string) []string // cmd to read rev's file - readZip func(rev, subdir, remote, target string) []string // cmd to read rev's subdir as zip file - doReadZip func(ctx context.Context, dst io.Writer, workDir, rev, subdir, remote string) error // arbitrary function to read rev's subdir as zip file + vcs string // vcs name "hg" + init func(remote string) []string // cmd to init repo to track remote + postInit func(context.Context, *vcsRepo) error // func to init repo after .init runs + repoSum func(remote string) []string // cmd to calculate reposum of remote repo + lookupRef func(remote, ref string) []string // cmd to look up ref in remote repo + tags func(remote string) []string // cmd to list local tags + tagsNeedsFetch bool // run fetch before tags + tagRE *lazyregexp.Regexp // regexp to extract tag names from output of tags cmd + branches func(remote string) []string // cmd to list local branches + branchesNeedsFetch bool // run branches before tags + branchRE *lazyregexp.Regexp // regexp to extract branch names from output of tags cmd + badLocalRevRE *lazyregexp.Regexp // regexp of names that must not be served out of local cache without doing fetch first + statLocal func(rev, remote string) []string // cmd to stat local rev + parseStat func(rev, out string) (*RevInfo, error) // func to parse output of statLocal + fetch []string // cmd to fetch everything from remote + latest string // name of latest commit on remote (tip, HEAD, etc) + descendsFrom func(rev, tag string) []string // cmd to check whether rev descends from tag + recentTags func(rev string) []string // cmd to print tag ancestors of rev + readFile func(rev, file, remote string) []string // cmd to read rev's file + readZip func(rev, subdir, remote, target string) []string // cmd to read rev's subdir as zip file + + // arbitrary function to read rev's subdir as zip file + doReadZip func(ctx context.Context, dst io.Writer, workDir, rev, subdir, remote string) error } var re = lazyregexp.New @@ -163,24 +183,50 @@ var re = lazyregexp.New var vcsCmds = map[string]*vcsCmd{ "hg": { vcs: "hg", - init: func(remote string) []string { - return []string{"hg", "clone", "-U", "--", remote, "."} + repoSum: func(remote string) []string { + return []string{ + "hg", + "--config=extensions.goreposum=" + filepath.Join(cfg.GOROOT, "lib/hg/goreposum.py"), + "goreposum", + remote, + } }, + lookupRef: func(remote, ref string) []string { + return []string{ + "hg", + "--config=extensions.goreposum=" + filepath.Join(cfg.GOROOT, "lib/hg/goreposum.py"), + "golookup", + remote, + ref, + } + }, + init: func(remote string) []string { + return []string{"hg", "init", "."} + }, + postInit: hgAddRemote, tags: func(remote string) []string { return []string{"hg", "tags", "-q"} }, - tagRE: re(`(?m)^[^\n]+$`), + tagsNeedsFetch: true, + tagRE: re(`(?m)^[^\n]+$`), branches: func(remote string) []string { return []string{"hg", "branches", "-c", "-q"} }, - branchRE: re(`(?m)^[^\n]+$`), - badLocalRevRE: re(`(?m)^(tip)$`), + branchesNeedsFetch: true, + branchRE: re(`(?m)^[^\n]+$`), + badLocalRevRE: re(`(?m)^(tip)$`), statLocal: func(rev, remote string) []string { return []string{"hg", "log", "-l1", "-r", rev, "--template", "{node} {date|hgdate} {tags}"} }, parseStat: hgParseStat, fetch: []string{"hg", "pull", "-f"}, latest: "tip", + descendsFrom: func(rev, tag string) []string { + return []string{"hg", "log", "-r", "ancestors(" + rev + ") and " + tag} + }, + recentTags: func(rev string) []string { + return []string{"hg", "log", "-r", "ancestors(" + rev + ") and tag()", "--template", "{tags}\n"} + }, readFile: func(rev, file, remote string) []string { return []string{"hg", "cat", "-r", rev, file} }, @@ -276,6 +322,10 @@ var vcsCmds = map[string]*vcsCmd{ } func (r *vcsRepo) loadTags(ctx context.Context) { + if r.cmd.tagsNeedsFetch { + r.fetchOnce.Do(func() { r.fetch(ctx) }) + } + out, err := Run(ctx, r.dir, r.cmd.tags(r.remote)) if err != nil { return @@ -296,6 +346,10 @@ func (r *vcsRepo) loadBranches(ctx context.Context) { return } + if r.cmd.branchesNeedsFetch { + r.fetchOnce.Do(func() { r.fetch(ctx) }) + } + out, err := Run(ctx, r.dir, r.cmd.branches(r.remote)) if err != nil { return @@ -310,7 +364,84 @@ func (r *vcsRepo) loadBranches(ctx context.Context) { } } +func (r *vcsRepo) loadRepoSum(ctx context.Context) { + if r.cmd.repoSum == nil { + return + } + where := r.remote + if r.fetched.Load() { + where = "." // use local repo + } + out, err := Run(ctx, r.dir, r.cmd.repoSum(where)) + if err != nil { + return + } + r.repoSum = strings.TrimSpace(string(out)) +} + +func (r *vcsRepo) lookupRef(ctx context.Context, ref string) (string, error) { + if r.cmd.lookupRef == nil { + return "", fmt.Errorf("no lookupRef") + } + out, err := Run(ctx, r.dir, r.cmd.lookupRef(r.remote, ref)) + if err != nil { + return "", err + } + return strings.TrimSpace(string(out)), nil +} + +// repoSumOrigin returns an Origin containing a RepoSum. +func (r *vcsRepo) repoSumOrigin(ctx context.Context) *Origin { + origin := &Origin{ + VCS: r.cmd.vcs, + URL: r.remote, + RepoSum: r.repoSum, + } + r.repoSumOnce.Do(func() { r.loadRepoSum(ctx) }) + origin.RepoSum = r.repoSum + return origin +} + func (r *vcsRepo) CheckReuse(ctx context.Context, old *Origin, subdir string) error { + if old == nil { + return fmt.Errorf("missing origin") + } + if old.VCS != r.cmd.vcs || old.URL != r.remote { + return fmt.Errorf("origin moved from %v %q to %v %q", old.VCS, old.URL, r.cmd.vcs, r.remote) + } + if old.Subdir != subdir { + return fmt.Errorf("origin moved from %v %q %q to %v %q %q", old.VCS, old.URL, old.Subdir, r.cmd.vcs, r.remote, subdir) + } + + if old.Ref == "" && old.RepoSum == "" && old.Hash != "" { + // Hash has to remain in repo. + hash, err := r.lookupRef(ctx, old.Hash) + if err == nil && hash == old.Hash { + return nil + } + if err != nil { + return fmt.Errorf("looking up hash: %v", err) + } + return fmt.Errorf("hash changed") // weird but maybe they made a tag + } + + if old.Ref != "" && old.RepoSum == "" { + hash, err := r.lookupRef(ctx, old.Ref) + if err == nil && hash != "" && hash == old.Hash { + return nil + } + } + + r.repoSumOnce.Do(func() { r.loadRepoSum(ctx) }) + if r.repoSum != "" { + if old.RepoSum == "" { + return fmt.Errorf("non-specific origin") + } + if old.RepoSum != r.repoSum { + return fmt.Errorf("repo changed") + } + return nil + } return fmt.Errorf("vcs %s: CheckReuse: %w", r.cmd.vcs, errors.ErrUnsupported) } @@ -323,14 +454,8 @@ func (r *vcsRepo) Tags(ctx context.Context, prefix string) (*Tags, error) { r.tagsOnce.Do(func() { r.loadTags(ctx) }) tags := &Tags{ - // None of the other VCS provide a reasonable way to compute TagSum - // without downloading the whole repo, so we only include VCS and URL - // in the Origin. - Origin: &Origin{ - VCS: r.cmd.vcs, - URL: r.remote, - }, - List: []Tag{}, + Origin: r.repoSumOrigin(ctx), + List: []Tag{}, } for tag := range r.tags { if strings.HasPrefix(tag, prefix) { @@ -372,7 +497,7 @@ func (r *vcsRepo) Stat(ctx context.Context, rev string) (*RevInfo, error) { } info, err := r.statLocal(ctx, rev) if err != nil { - return nil, err + return info, err } if !revOK { info.Version = info.Name @@ -389,13 +514,15 @@ func (r *vcsRepo) fetch(ctx context.Context) { } _, r.fetchErr = Run(ctx, r.dir, r.cmd.fetch) release() + r.fetched.Store(true) } } func (r *vcsRepo) statLocal(ctx context.Context, rev string) (*RevInfo, error) { out, err := Run(ctx, r.dir, r.cmd.statLocal(rev, r.remote)) if err != nil { - return nil, &UnknownRevisionError{Rev: rev} + info := &RevInfo{Origin: r.repoSumOrigin(ctx)} + return info, &UnknownRevisionError{Rev: rev} } info, err := r.cmd.parseStat(rev, string(out)) if err != nil { @@ -406,6 +533,10 @@ func (r *vcsRepo) statLocal(ctx context.Context, rev string) (*RevInfo, error) { } info.Origin.VCS = r.cmd.vcs info.Origin.URL = r.remote + info.Origin.Ref = rev + if strings.HasPrefix(info.Name, rev) && len(rev) >= 12 { + info.Origin.Ref = "" // duplicates Hash + } return info, nil } @@ -437,16 +568,37 @@ func (r *vcsRepo) ReadFile(ctx context.Context, rev, file string, maxSize int64) } func (r *vcsRepo) RecentTag(ctx context.Context, rev, prefix string, allowed func(string) bool) (tag string, err error) { - // We don't technically need to lock here since we're returning an error - // unconditionally, but doing so anyway will help to avoid baking in - // lock-inversion bugs. + // Only lock for the subprocess execution, not for the tag scan. + // allowed may call other methods that acquire the lock. unlock, err := r.mu.Lock() if err != nil { return "", err } - defer unlock() - return "", vcsErrorf("vcs %s: RecentTag: %w", r.cmd.vcs, errors.ErrUnsupported) + if r.cmd.recentTags == nil { + unlock() + return "", vcsErrorf("vcs %s: RecentTag: %w", r.cmd.vcs, errors.ErrUnsupported) + } + out, err := Run(ctx, r.dir, r.cmd.recentTags(rev)) + unlock() + if err != nil { + return "", err + } + + highest := "" + for _, tag := range strings.Fields(string(out)) { + if !strings.HasPrefix(tag, prefix) || !allowed(tag) { + continue + } + semtag := tag[len(prefix):] + if semver.Compare(semtag, highest) > 0 { + highest = semtag + } + } + if highest != "" { + return prefix + highest, nil + } + return "", nil } func (r *vcsRepo) DescendsFrom(ctx context.Context, rev, tag string) (bool, error) { @@ -456,7 +608,15 @@ func (r *vcsRepo) DescendsFrom(ctx context.Context, rev, tag string) (bool, erro } defer unlock() - return false, vcsErrorf("vcs %s: DescendsFrom: %w", r.cmd.vcs, errors.ErrUnsupported) + if r.cmd.descendsFrom == nil { + return false, vcsErrorf("vcs %s: DescendsFrom: %w", r.cmd.vcs, errors.ErrUnsupported) + } + + out, err := Run(ctx, r.dir, r.cmd.descendsFrom(rev, tag)) + if err != nil { + return false, err + } + return strings.TrimSpace(string(out)) != "", nil } func (r *vcsRepo) ReadZip(ctx context.Context, rev, subdir string, maxSize int64) (zip io.ReadCloser, err error) { @@ -464,15 +624,20 @@ func (r *vcsRepo) ReadZip(ctx context.Context, rev, subdir string, maxSize int64 return nil, vcsErrorf("vcs %s: ReadZip: %w", r.cmd.vcs, errors.ErrUnsupported) } + if rev == "latest" { + rev = r.cmd.latest + } + _, err = r.Stat(ctx, rev) // download rev into local repo + if err != nil { + return nil, err + } + unlock, err := r.mu.Lock() if err != nil { return nil, err } defer unlock() - if rev == "latest" { - rev = r.cmd.latest - } f, err := os.CreateTemp("", "go-readzip-*.zip") if err != nil { return nil, err @@ -521,6 +686,11 @@ func (d *deleteCloser) Close() error { return d.File.Close() } +func hgAddRemote(ctx context.Context, r *vcsRepo) error { + // Write .hg/hgrc with remote URL in it. + return os.WriteFile(filepath.Join(r.dir, ".hg/hgrc"), []byte(fmt.Sprintf("[paths]\ndefault = %s\n", r.remote)), 0666) +} + func hgParseStat(rev, out string) (*RevInfo, error) { f := strings.Fields(out) if len(f) < 3 { @@ -545,9 +715,7 @@ func hgParseStat(rev, out string) (*RevInfo, error) { sort.Strings(tags) info := &RevInfo{ - Origin: &Origin{ - Hash: hash, - }, + Origin: &Origin{Hash: hash}, Name: hash, Short: ShortenSHA1(hash), Time: time.Unix(t, 0).UTC(), @@ -630,9 +798,7 @@ func fossilParseStat(rev, out string) (*RevInfo, error) { version = hash // extend to full hash } info := &RevInfo{ - Origin: &Origin{ - Hash: hash, - }, + Origin: &Origin{Hash: hash}, Name: hash, Short: ShortenSHA1(hash), Time: t, diff --git a/src/cmd/go/internal/modfetch/coderepo.go b/src/cmd/go/internal/modfetch/coderepo.go index 3df469d2852..7cec96a3070 100644 --- a/src/cmd/go/internal/modfetch/coderepo.go +++ b/src/cmd/go/internal/modfetch/coderepo.go @@ -384,6 +384,9 @@ func (r *codeRepo) convert(ctx context.Context, info *codehost.RevInfo, statVers } else { origin.TagPrefix = tags.Origin.TagPrefix origin.TagSum = tags.Origin.TagSum + if tags.Origin.RepoSum != "" { + origin.RepoSum = tags.Origin.RepoSum + } } } }() diff --git a/src/cmd/go/internal/modfetch/fetch.go b/src/cmd/go/internal/modfetch/fetch.go index 5d310ccbba9..0a84aecd426 100644 --- a/src/cmd/go/internal/modfetch/fetch.go +++ b/src/cmd/go/internal/modfetch/fetch.go @@ -35,13 +35,6 @@ import ( modzip "golang.org/x/mod/zip" ) -// The downloadCache is used to cache the operation of downloading a module to disk -// (if it's not already downloaded) and getting the directory it was downloaded to. -// It is important that downloadCache must not be accessed by any of the exported -// functions of this package after they return, because it can be modified by the -// non-thread-safe SetState function -var downloadCache = new(par.ErrCache[module.Version, string]) // version → directory; - var ErrToolchain = errors.New("internal error: invalid operation on toolchain module") // Download downloads the specific module version to the @@ -56,7 +49,7 @@ func Download(ctx context.Context, mod module.Version) (dir string, err error) { } // The par.Cache here avoids duplicate work. - return downloadCache.Do(mod, func() (string, error) { + return ModuleFetchState.downloadCache.Do(mod, func() (string, error) { dir, err := download(ctx, mod) if err != nil { return "", err @@ -85,7 +78,7 @@ func Unzip(ctx context.Context, mod module.Version, zipfile string) (dir string, base.Fatal(err) } - return downloadCache.Do(mod, func() (string, error) { + return ModuleFetchState.downloadCache.Do(mod, func() (string, error) { ctx, span := trace.StartSpan(ctx, "modfetch.Unzip "+mod.String()) defer span.Done() @@ -214,9 +207,12 @@ func DownloadZip(ctx context.Context, mod module.Version) (zipfile string, err e } ziphashfile := zipfile + "hash" - // Return without locking if the zip and ziphash files exist. + // Return early if the zip and ziphash files exist. if _, err := os.Stat(zipfile); err == nil { if _, err := os.Stat(ziphashfile); err == nil { + if !HaveSum(mod) { + checkMod(ctx, mod) + } return zipfile, nil } } @@ -441,9 +437,6 @@ func RemoveAll(dir string) error { // accessed by any of the exported functions of this package after they return, because // they can be modified by the non-thread-safe SetState function. -var GoSumFile string // path to go.sum; set by package modload -var WorkspaceGoSumFiles []string // path to module go.sums in workspace; set by package modload - type modSum struct { mod module.Version sum string @@ -468,11 +461,31 @@ type modSumStatus struct { // State holds a snapshot of the global state of the modfetch package. type State struct { - goSumFile string - workspaceGoSumFiles []string - lookupCache *par.Cache[lookupCacheKey, Repo] - downloadCache *par.ErrCache[module.Version, string] - sumState sumState + // path to go.sum; set by package modload + GoSumFile string + // path to module go.sums in workspace; set by package modload + WorkspaceGoSumFiles []string + // The Lookup cache is used cache the work done by Lookup. + // It is important that the global functions of this package that access it do not + // do so after they return. + lookupCache *par.Cache[lookupCacheKey, Repo] + // The downloadCache is used to cache the operation of downloading a module to disk + // (if it's not already downloaded) and getting the directory it was downloaded to. + // It is important that downloadCache must not be accessed by any of the exported + // functions of this package after they return, because it can be modified by the + // non-thread-safe SetState function. + downloadCache *par.ErrCache[module.Version, string] // version → directory; + + sumState sumState +} + +var ModuleFetchState *State = NewState() + +func NewState() *State { + s := new(State) + s.lookupCache = new(par.Cache[lookupCacheKey, Repo]) + s.downloadCache = new(par.ErrCache[module.Version, string]) + return s } // Reset resets globals in the modfetch package, so previous loads don't affect @@ -497,20 +510,20 @@ func SetState(newState State) (oldState State) { defer goSum.mu.Unlock() oldState = State{ - goSumFile: GoSumFile, - workspaceGoSumFiles: WorkspaceGoSumFiles, - lookupCache: lookupCache, - downloadCache: downloadCache, + GoSumFile: ModuleFetchState.GoSumFile, + WorkspaceGoSumFiles: ModuleFetchState.WorkspaceGoSumFiles, + lookupCache: ModuleFetchState.lookupCache, + downloadCache: ModuleFetchState.downloadCache, sumState: goSum.sumState, } - GoSumFile = newState.goSumFile - WorkspaceGoSumFiles = newState.workspaceGoSumFiles + ModuleFetchState.GoSumFile = newState.GoSumFile + ModuleFetchState.WorkspaceGoSumFiles = newState.WorkspaceGoSumFiles // Uses of lookupCache and downloadCache both can call checkModSum, // which in turn sets the used bit on goSum.status for modules. // Set (or reset) them so used can be computed properly. - lookupCache = newState.lookupCache - downloadCache = newState.downloadCache + ModuleFetchState.lookupCache = newState.lookupCache + ModuleFetchState.downloadCache = newState.downloadCache // Set, or reset all fields on goSum. If being reset to empty, it will be initialized later. goSum.sumState = newState.sumState @@ -522,7 +535,7 @@ func SetState(newState State) (oldState State) { // use of go.sum is now enabled. // The goSum lock must be held. func initGoSum() (bool, error) { - if GoSumFile == "" { + if ModuleFetchState.GoSumFile == "" { return false, nil } if goSum.m != nil { @@ -533,7 +546,7 @@ func initGoSum() (bool, error) { goSum.status = make(map[modSum]modSumStatus) goSum.w = make(map[string]map[module.Version][]string) - for _, f := range WorkspaceGoSumFiles { + for _, f := range ModuleFetchState.WorkspaceGoSumFiles { goSum.w[f] = make(map[module.Version][]string) _, err := readGoSumFile(goSum.w[f], f) if err != nil { @@ -541,7 +554,7 @@ func initGoSum() (bool, error) { } } - enabled, err := readGoSumFile(goSum.m, GoSumFile) + enabled, err := readGoSumFile(goSum.m, ModuleFetchState.GoSumFile) goSum.enabled = enabled return enabled, err } @@ -787,7 +800,7 @@ func checkModSum(mod module.Version, h string) error { // goSum.mu must be locked. func haveModSumLocked(mod module.Version, h string) bool { sumFileName := "go.sum" - if strings.HasSuffix(GoSumFile, "go.work.sum") { + if strings.HasSuffix(ModuleFetchState.GoSumFile, "go.work.sum") { sumFileName = "go.work.sum" } for _, vh := range goSum.m[mod] { @@ -931,7 +944,7 @@ Outer: if readonly { return ErrGoSumDirty } - if fsys.Replaced(GoSumFile) { + if fsys.Replaced(ModuleFetchState.GoSumFile) { base.Fatalf("go: updates to go.sum needed, but go.sum is part of the overlay specified with -overlay") } @@ -941,7 +954,7 @@ Outer: defer unlock() } - err := lockedfile.Transform(GoSumFile, func(data []byte) ([]byte, error) { + err := lockedfile.Transform(ModuleFetchState.GoSumFile, func(data []byte) ([]byte, error) { tidyGoSum := tidyGoSum(data, keep) return tidyGoSum, nil }) @@ -960,7 +973,7 @@ Outer: func TidyGoSum(keep map[module.Version]bool) (before, after []byte) { goSum.mu.Lock() defer goSum.mu.Unlock() - before, err := lockedfile.Read(GoSumFile) + before, err := lockedfile.Read(ModuleFetchState.GoSumFile) if err != nil && !errors.Is(err, fs.ErrNotExist) { base.Fatalf("reading go.sum: %v", err) } @@ -977,7 +990,7 @@ func tidyGoSum(data []byte, keep map[module.Version]bool) []byte { // truncated the file to remove erroneous hashes, and we shouldn't restore // them without good reason. goSum.m = make(map[module.Version][]string, len(goSum.m)) - readGoSum(goSum.m, GoSumFile, data) + readGoSum(goSum.m, ModuleFetchState.GoSumFile, data) for ms, st := range goSum.status { if st.used && !sumInWorkspaceModulesLocked(ms.mod) { addModSumLocked(ms.mod, ms.sum) diff --git a/src/cmd/go/internal/modfetch/repo.go b/src/cmd/go/internal/modfetch/repo.go index 5d4d679e832..bb5dfc4655d 100644 --- a/src/cmd/go/internal/modfetch/repo.go +++ b/src/cmd/go/internal/modfetch/repo.go @@ -184,11 +184,6 @@ type RevInfo struct { // To avoid version control access except when absolutely necessary, // Lookup does not attempt to connect to the repository itself. -// The Lookup cache is used cache the work done by Lookup. -// It is important that the global functions of this package that access it do not -// do so after they return. -var lookupCache = new(par.Cache[lookupCacheKey, Repo]) - type lookupCacheKey struct { proxy, path string } @@ -210,7 +205,7 @@ func Lookup(ctx context.Context, proxy, path string) Repo { defer logCall("Lookup(%q, %q)", proxy, path)() } - return lookupCache.Do(lookupCacheKey{proxy, path}, func() Repo { + return ModuleFetchState.lookupCache.Do(lookupCacheKey{proxy, path}, func() Repo { return newCachingRepo(ctx, path, func(ctx context.Context) (Repo, error) { r, err := lookup(ctx, proxy, path) if err == nil && traceRepo { diff --git a/src/cmd/go/internal/modfetch/zip_sum_test/zip_sum_test.go b/src/cmd/go/internal/modfetch/zip_sum_test/zip_sum_test.go index 16cc1457058..edae1d8f3cf 100644 --- a/src/cmd/go/internal/modfetch/zip_sum_test/zip_sum_test.go +++ b/src/cmd/go/internal/modfetch/zip_sum_test/zip_sum_test.go @@ -31,7 +31,6 @@ import ( "cmd/go/internal/cfg" "cmd/go/internal/modfetch" - "cmd/go/internal/modload" "golang.org/x/mod/module" ) @@ -94,7 +93,6 @@ func TestZipSums(t *testing.T) { cfg.GOPROXY = "direct" cfg.GOSUMDB = "off" - modload.Init() // Shard tests by downloading only every nth module when shard flags are set. // This makes it easier to test small groups of modules quickly. We avoid diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go index 25dbf3972fd..c8dc6e29bf6 100644 --- a/src/cmd/go/internal/modget/get.go +++ b/src/cmd/go/internal/modget/get.go @@ -273,6 +273,7 @@ func init() { } func runGet(ctx context.Context, cmd *base.Command, args []string) { + moduleLoaderState := modload.NewState() switch getU.version { case "", "upgrade", "patch": // ok @@ -298,7 +299,7 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { base.Fatalf("go: -insecure flag is no longer supported; use GOINSECURE instead") } - modload.ForceUseModules = true + moduleLoaderState.ForceUseModules = true // Do not allow any updating of go.mod until we've applied // all the requested changes and checked that the result matches @@ -307,14 +308,14 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { // Allow looking up modules for import paths when outside of a module. // 'go get' is expected to do this, unlike other commands. - modload.AllowMissingModuleImports() + moduleLoaderState.AllowMissingModuleImports() // 'go get' no longer builds or installs packages, so there's nothing to do // if there's no go.mod file. // TODO(#40775): make modload.Init return ErrNoModRoot instead of exiting. // We could handle that here by printing a different message. - modload.Init() - if !modload.HasModRoot() { + modload.Init(moduleLoaderState) + if !moduleLoaderState.HasModRoot() { base.Fatalf("go: go.mod file not found in current directory or any parent directory.\n" + "\t'go get' is no longer supported outside a module.\n" + "\tTo build and install a command, use 'go install' with a version,\n" + @@ -323,7 +324,7 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { "\tor run 'go help get' or 'go help install'.") } - dropToolchain, queries := parseArgs(ctx, args) + dropToolchain, queries := parseArgs(moduleLoaderState, ctx, args) opts := modload.WriteOpts{ DropToolchain: dropToolchain, } @@ -333,17 +334,17 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { } } - r := newResolver(ctx, queries) - r.performLocalQueries(ctx) - r.performPathQueries(ctx) - r.performToolQueries(ctx) - r.performWorkQueries(ctx) + r := newResolver(moduleLoaderState, ctx, queries) + r.performLocalQueries(moduleLoaderState, ctx) + r.performPathQueries(moduleLoaderState, ctx) + r.performToolQueries(moduleLoaderState, ctx) + r.performWorkQueries(moduleLoaderState, ctx) for { - r.performWildcardQueries(ctx) - r.performPatternAllQueries(ctx) + r.performWildcardQueries(moduleLoaderState, ctx) + r.performPatternAllQueries(moduleLoaderState, ctx) - if changed := r.resolveQueries(ctx, queries); changed { + if changed := r.resolveQueries(moduleLoaderState, ctx, queries); changed { // 'go get' arguments can be (and often are) package patterns rather than // (just) modules. A package can be provided by any module with a prefix // of its import path, and a wildcard can even match packages in modules @@ -379,20 +380,20 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { // // - ambiguous import errors. // TODO(#27899): Try to resolve ambiguous import errors automatically. - upgrades := r.findAndUpgradeImports(ctx, queries) - if changed := r.applyUpgrades(ctx, upgrades); changed { + upgrades := r.findAndUpgradeImports(moduleLoaderState, ctx, queries) + if changed := r.applyUpgrades(moduleLoaderState, ctx, upgrades); changed { continue } - r.findMissingWildcards(ctx) - if changed := r.resolveQueries(ctx, r.wildcardQueries); changed { + r.findMissingWildcards(moduleLoaderState, ctx) + if changed := r.resolveQueries(moduleLoaderState, ctx, r.wildcardQueries); changed { continue } break } - r.checkWildcardVersions(ctx) + r.checkWildcardVersions(moduleLoaderState, ctx) var pkgPatterns []string for _, q := range queries { @@ -403,36 +404,36 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { // If a workspace applies, checkPackageProblems will switch to the workspace // using modload.EnterWorkspace when doing the final load, and then switch back. - r.checkPackageProblems(ctx, pkgPatterns) + r.checkPackageProblems(moduleLoaderState, ctx, pkgPatterns) if *getTool { - updateTools(ctx, queries, &opts) + updateTools(moduleLoaderState, ctx, queries, &opts) } // Everything succeeded. Update go.mod. - oldReqs := reqsFromGoMod(modload.ModFile()) + oldReqs := reqsFromGoMod(modload.ModFile(moduleLoaderState)) - if err := modload.WriteGoMod(ctx, opts); err != nil { + if err := modload.WriteGoMod(moduleLoaderState, ctx, opts); err != nil { // A TooNewError can happen for 'go get go@newversion' // when all the required modules are old enough // but the command line is not. // TODO(bcmills): modload.EditBuildList should catch this instead, // and then this can be changed to base.Fatal(err). - toolchain.SwitchOrFatal(ctx, err) + toolchain.SwitchOrFatal(moduleLoaderState, ctx, err) } - newReqs := reqsFromGoMod(modload.ModFile()) + newReqs := reqsFromGoMod(modload.ModFile(moduleLoaderState)) r.reportChanges(oldReqs, newReqs) - if gowork := modload.FindGoWork(base.Cwd()); gowork != "" { + if gowork := moduleLoaderState.FindGoWork(base.Cwd()); gowork != "" { wf, err := modload.ReadWorkFile(gowork) - if err == nil && modload.UpdateWorkGoVersion(wf, modload.MainModules.GoVersion()) { + if err == nil && modload.UpdateWorkGoVersion(wf, moduleLoaderState.MainModules.GoVersion(moduleLoaderState)) { modload.WriteWorkFile(gowork, wf) } } } -func updateTools(ctx context.Context, queries []*query, opts *modload.WriteOpts) { +func updateTools(loaderstate *modload.State, ctx context.Context, queries []*query, opts *modload.WriteOpts) { pkgOpts := modload.PackageOpts{ VendorModulesInGOROOTSrc: true, LoadTests: *getT, @@ -448,7 +449,7 @@ func updateTools(ctx context.Context, queries []*query, opts *modload.WriteOpts) patterns = append(patterns, q.pattern) } - matches, _ := modload.LoadPackages(ctx, pkgOpts, patterns...) + matches, _ := modload.LoadPackages(loaderstate, ctx, pkgOpts, patterns...) for i, m := range matches { if queries[i].version == "none" { opts.DropTools = append(opts.DropTools, m.Pkgs...) @@ -462,11 +463,11 @@ func updateTools(ctx context.Context, queries []*query, opts *modload.WriteOpts) // // The command-line arguments are of the form path@version or simply path, with // implicit @upgrade. path@none is "downgrade away". -func parseArgs(ctx context.Context, rawArgs []string) (dropToolchain bool, queries []*query) { +func parseArgs(loaderstate *modload.State, ctx context.Context, rawArgs []string) (dropToolchain bool, queries []*query) { defer base.ExitIfErrors() for _, arg := range search.CleanPatterns(rawArgs) { - q, err := newQuery(arg) + q, err := newQuery(loaderstate, arg) if err != nil { base.Error(err) continue @@ -553,12 +554,12 @@ type matchInModuleKey struct { m module.Version } -func newResolver(ctx context.Context, queries []*query) *resolver { +func newResolver(loaderstate *modload.State, ctx context.Context, queries []*query) *resolver { // LoadModGraph also sets modload.Target, which is needed by various resolver // methods. - mg, err := modload.LoadModGraph(ctx, "") + mg, err := modload.LoadModGraph(loaderstate, ctx, "") if err != nil { - toolchain.SwitchOrFatal(ctx, err) + toolchain.SwitchOrFatal(loaderstate, ctx, err) } buildList := mg.BuildList() @@ -574,7 +575,7 @@ func newResolver(ctx context.Context, queries []*query) *resolver { buildListVersion: initialVersion, initialVersion: initialVersion, nonesByPath: map[string]*query{}, - workspace: loadWorkspace(modload.FindGoWork(base.Cwd())), + workspace: loadWorkspace(loaderstate.FindGoWork(base.Cwd())), } for _, q := range queries { @@ -643,9 +644,9 @@ func (r *resolver) noneForPath(mPath string) (nq *query, found bool) { // queryModule wraps modload.Query, substituting r.checkAllowedOr to decide // allowed versions. -func (r *resolver) queryModule(ctx context.Context, mPath, query string, selected func(string) string) (module.Version, error) { +func (r *resolver) queryModule(loaderstate *modload.State, ctx context.Context, mPath, query string, selected func(string) string) (module.Version, error) { current := r.initialSelected(mPath) - rev, err := modload.Query(ctx, mPath, query, current, r.checkAllowedOr(query, selected)) + rev, err := modload.Query(loaderstate, ctx, mPath, query, current, r.checkAllowedOr(loaderstate, query, selected)) if err != nil { return module.Version{}, err } @@ -654,8 +655,8 @@ func (r *resolver) queryModule(ctx context.Context, mPath, query string, selecte // queryPackages wraps modload.QueryPackage, substituting r.checkAllowedOr to // decide allowed versions. -func (r *resolver) queryPackages(ctx context.Context, pattern, query string, selected func(string) string) (pkgMods []module.Version, err error) { - results, err := modload.QueryPackages(ctx, pattern, query, selected, r.checkAllowedOr(query, selected)) +func (r *resolver) queryPackages(loaderstate *modload.State, ctx context.Context, pattern, query string, selected func(string) string) (pkgMods []module.Version, err error) { + results, err := modload.QueryPackages(loaderstate, ctx, pattern, query, selected, r.checkAllowedOr(loaderstate, query, selected)) if len(results) > 0 { pkgMods = make([]module.Version, 0, len(results)) for _, qr := range results { @@ -667,8 +668,8 @@ func (r *resolver) queryPackages(ctx context.Context, pattern, query string, sel // queryPattern wraps modload.QueryPattern, substituting r.checkAllowedOr to // decide allowed versions. -func (r *resolver) queryPattern(ctx context.Context, pattern, query string, selected func(string) string) (pkgMods []module.Version, mod module.Version, err error) { - results, modOnly, err := modload.QueryPattern(ctx, pattern, query, selected, r.checkAllowedOr(query, selected)) +func (r *resolver) queryPattern(loaderstate *modload.State, ctx context.Context, pattern, query string, selected func(string) string) (pkgMods []module.Version, mod module.Version, err error) { + results, modOnly, err := modload.QueryPattern(loaderstate, ctx, pattern, query, selected, r.checkAllowedOr(loaderstate, query, selected)) if len(results) > 0 { pkgMods = make([]module.Version, 0, len(results)) for _, qr := range results { @@ -683,22 +684,22 @@ func (r *resolver) queryPattern(ctx context.Context, pattern, query string, sele // checkAllowedOr is like modload.CheckAllowed, but it always allows the requested // and current versions (even if they are retracted or otherwise excluded). -func (r *resolver) checkAllowedOr(requested string, selected func(string) string) modload.AllowedFunc { +func (r *resolver) checkAllowedOr(s *modload.State, requested string, selected func(string) string) modload.AllowedFunc { return func(ctx context.Context, m module.Version) error { if m.Version == requested { - return modload.CheckExclusions(ctx, m) + return s.CheckExclusions(ctx, m) } if (requested == "upgrade" || requested == "patch") && m.Version == selected(m.Path) { return nil } - return modload.CheckAllowed(ctx, m) + return s.CheckAllowed(ctx, m) } } // matchInModule is a caching wrapper around modload.MatchInModule. -func (r *resolver) matchInModule(ctx context.Context, pattern string, m module.Version) (packages []string, err error) { +func (r *resolver) matchInModule(loaderstate *modload.State, ctx context.Context, pattern string, m module.Version) (packages []string, err error) { return r.matchInModuleCache.Do(matchInModuleKey{pattern, m}, func() ([]string, error) { - match := modload.MatchInModule(ctx, pattern, m, imports.AnyTags()) + match := modload.MatchInModule(loaderstate, ctx, pattern, m, imports.AnyTags()) if len(match.Errs) > 0 { return match.Pkgs, match.Errs[0] } @@ -714,15 +715,15 @@ func (r *resolver) matchInModule(ctx context.Context, pattern string, m module.V // modules second. However, no module exists at version "none", and therefore no // package exists at that version either: we know that the argument cannot match // any packages, and thus it must match modules instead. -func (r *resolver) queryNone(ctx context.Context, q *query) { +func (r *resolver) queryNone(loaderstate *modload.State, ctx context.Context, q *query) { if search.IsMetaPackage(q.pattern) { panic(fmt.Sprintf("internal error: queryNone called with pattern %q", q.pattern)) } if !q.isWildcard() { q.pathOnce(q.pattern, func() pathSet { - hasModRoot := modload.HasModRoot() - if hasModRoot && modload.MainModules.Contains(q.pattern) { + hasModRoot := loaderstate.HasModRoot() + if hasModRoot && loaderstate.MainModules.Contains(q.pattern) { v := module.Version{Path: q.pattern} // The user has explicitly requested to downgrade their own module to // version "none". This is not an entirely unreasonable request: it @@ -734,7 +735,12 @@ func (r *resolver) queryNone(ctx context.Context, q *query) { // However, neither of those behaviors would be consistent with the // plain meaning of the query. To try to reduce confusion, reject the // query explicitly. - return errSet(&modload.QueryMatchesMainModulesError{MainModules: []module.Version{v}, Pattern: q.pattern, Query: q.version}) + return errSet(&modload.QueryMatchesMainModulesError{ + MainModules: []module.Version{v}, + Pattern: q.pattern, + Query: q.version, + PatternIsModule: loaderstate.MainModules.Contains(q.pattern), + }) } return pathSet{mod: module.Version{Path: q.pattern, Version: "none"}} @@ -746,15 +752,20 @@ func (r *resolver) queryNone(ctx context.Context, q *query) { continue } q.pathOnce(curM.Path, func() pathSet { - if modload.HasModRoot() && curM.Version == "" && modload.MainModules.Contains(curM.Path) { - return errSet(&modload.QueryMatchesMainModulesError{MainModules: []module.Version{curM}, Pattern: q.pattern, Query: q.version}) + if loaderstate.HasModRoot() && curM.Version == "" && loaderstate.MainModules.Contains(curM.Path) { + return errSet(&modload.QueryMatchesMainModulesError{ + MainModules: []module.Version{curM}, + Pattern: q.pattern, + Query: q.version, + PatternIsModule: loaderstate.MainModules.Contains(q.pattern), + }) } return pathSet{mod: module.Version{Path: curM.Path, Version: "none"}} }) } } -func (r *resolver) performLocalQueries(ctx context.Context) { +func (r *resolver) performLocalQueries(loaderstate *modload.State, ctx context.Context) { for _, q := range r.localQueries { q.pathOnce(q.pattern, func() pathSet { absDetail := "" @@ -766,13 +777,13 @@ func (r *resolver) performLocalQueries(ctx context.Context) { // Absolute paths like C:\foo and relative paths like ../foo... are // restricted to matching packages in the main module. - pkgPattern, mainModule := modload.MainModules.DirImportPath(ctx, q.pattern) + pkgPattern, mainModule := loaderstate.MainModules.DirImportPath(loaderstate, ctx, q.pattern) if pkgPattern == "." { - modload.MustHaveModRoot() - versions := modload.MainModules.Versions() + loaderstate.MustHaveModRoot() + versions := loaderstate.MainModules.Versions() modRoots := make([]string, 0, len(versions)) for _, m := range versions { - modRoots = append(modRoots, modload.MainModules.ModRoot(m)) + modRoots = append(modRoots, loaderstate.MainModules.ModRoot(m)) } var plural string if len(modRoots) != 1 { @@ -781,7 +792,7 @@ func (r *resolver) performLocalQueries(ctx context.Context) { return errSet(fmt.Errorf("%s%s is not within module%s rooted at %s", q.pattern, absDetail, plural, strings.Join(modRoots, ", "))) } - match := modload.MatchInModule(ctx, pkgPattern, mainModule, imports.AnyTags()) + match := modload.MatchInModule(loaderstate, ctx, pkgPattern, mainModule, imports.AnyTags()) if len(match.Errs) > 0 { return pathSet{err: match.Errs[0]} } @@ -791,8 +802,8 @@ func (r *resolver) performLocalQueries(ctx context.Context) { return errSet(fmt.Errorf("no package to get in current directory")) } if !q.isWildcard() { - modload.MustHaveModRoot() - return errSet(fmt.Errorf("%s%s is not a package in module rooted at %s", q.pattern, absDetail, modload.MainModules.ModRoot(mainModule))) + loaderstate.MustHaveModRoot() + return errSet(fmt.Errorf("%s%s is not a package in module rooted at %s", q.pattern, absDetail, loaderstate.MainModules.ModRoot(mainModule))) } search.WarnUnmatched([]*search.Match{match}) return pathSet{} @@ -811,14 +822,14 @@ func (r *resolver) performLocalQueries(ctx context.Context) { // of modules may be expanded by other queries, so wildcard queries need to be // re-evaluated whenever a potentially-matching module path is added to the // build list. -func (r *resolver) performWildcardQueries(ctx context.Context) { +func (r *resolver) performWildcardQueries(loaderstate *modload.State, ctx context.Context) { for _, q := range r.wildcardQueries { q := q r.work.Add(func() { if q.version == "none" { - r.queryNone(ctx, q) + r.queryNone(loaderstate, ctx, q) } else { - r.queryWildcard(ctx, q) + r.queryWildcard(loaderstate, ctx, q) } }) } @@ -830,7 +841,7 @@ func (r *resolver) performWildcardQueries(ctx context.Context) { // - that module exists at some version matching q.version, and // - either the module path itself matches q.pattern, or some package within // the module at q.version matches q.pattern. -func (r *resolver) queryWildcard(ctx context.Context, q *query) { +func (r *resolver) queryWildcard(loaderstate *modload.State, ctx context.Context, q *query) { // For wildcard patterns, modload.QueryPattern only identifies modules // matching the prefix of the path before the wildcard. However, the build // list may already contain other modules with matching packages, and we @@ -848,16 +859,17 @@ func (r *resolver) queryWildcard(ctx context.Context, q *query) { return pathSet{} } - if modload.MainModules.Contains(curM.Path) && !versionOkForMainModule(q.version) { + if loaderstate.MainModules.Contains(curM.Path) && !versionOkForMainModule(q.version) { if q.matchesPath(curM.Path) { return errSet(&modload.QueryMatchesMainModulesError{ - MainModules: []module.Version{curM}, - Pattern: q.pattern, - Query: q.version, + MainModules: []module.Version{curM}, + Pattern: q.pattern, + Query: q.version, + PatternIsModule: loaderstate.MainModules.Contains(q.pattern), }) } - packages, err := r.matchInModule(ctx, q.pattern, curM) + packages, err := r.matchInModule(loaderstate, ctx, q.pattern, curM) if err != nil { return errSet(err) } @@ -869,10 +881,10 @@ func (r *resolver) queryWildcard(ctx context.Context, q *query) { }) } - return r.tryWildcard(ctx, q, curM) + return r.tryWildcard(loaderstate, ctx, q, curM) } - m, err := r.queryModule(ctx, curM.Path, q.version, r.initialSelected) + m, err := r.queryModule(loaderstate, ctx, curM.Path, q.version, r.initialSelected) if err != nil { if !isNoSuchModuleVersion(err) { // We can't tell whether a matching version exists. @@ -894,7 +906,7 @@ func (r *resolver) queryWildcard(ctx context.Context, q *query) { return pathSet{} } - return r.tryWildcard(ctx, q, m) + return r.tryWildcard(loaderstate, ctx, q, m) }) } @@ -905,9 +917,9 @@ func (r *resolver) queryWildcard(ctx context.Context, q *query) { // tryWildcard returns a pathSet for module m matching query q. // If m does not actually match q, tryWildcard returns an empty pathSet. -func (r *resolver) tryWildcard(ctx context.Context, q *query, m module.Version) pathSet { +func (r *resolver) tryWildcard(loaderstate *modload.State, ctx context.Context, q *query, m module.Version) pathSet { mMatches := q.matchesPath(m.Path) - packages, err := r.matchInModule(ctx, q.pattern, m) + packages, err := r.matchInModule(loaderstate, ctx, q.pattern, m) if err != nil { return errSet(err) } @@ -922,14 +934,14 @@ func (r *resolver) tryWildcard(ctx context.Context, q *query, m module.Version) // findMissingWildcards adds a candidate set for each query in r.wildcardQueries // that has not yet resolved to any version containing packages. -func (r *resolver) findMissingWildcards(ctx context.Context) { +func (r *resolver) findMissingWildcards(loaderstate *modload.State, ctx context.Context) { for _, q := range r.wildcardQueries { if q.version == "none" || q.matchesPackages { continue // q is not “missing” } r.work.Add(func() { q.pathOnce(q.pattern, func() pathSet { - pkgMods, mod, err := r.queryPattern(ctx, q.pattern, q.version, r.initialSelected) + pkgMods, mod, err := r.queryPattern(loaderstate, ctx, q.pattern, q.version, r.initialSelected) if err != nil { if isNoSuchPackageVersion(err) && len(q.resolved) > 0 { // q already resolved one or more modules but matches no packages. @@ -950,7 +962,7 @@ func (r *resolver) findMissingWildcards(ctx context.Context) { // checkWildcardVersions reports an error if any module in the build list has a // path (or contains a package) matching a query with a wildcard pattern, but // has a selected version that does *not* match the query. -func (r *resolver) checkWildcardVersions(ctx context.Context) { +func (r *resolver) checkWildcardVersions(loaderstate *modload.State, ctx context.Context) { defer base.ExitIfErrors() for _, q := range r.wildcardQueries { @@ -959,7 +971,7 @@ func (r *resolver) checkWildcardVersions(ctx context.Context) { continue } if !q.matchesPath(curM.Path) { - packages, err := r.matchInModule(ctx, q.pattern, curM) + packages, err := r.matchInModule(loaderstate, ctx, q.pattern, curM) if len(packages) == 0 { if err != nil { reportError(q, err) @@ -968,7 +980,7 @@ func (r *resolver) checkWildcardVersions(ctx context.Context) { } } - rev, err := r.queryModule(ctx, curM.Path, q.version, r.initialSelected) + rev, err := r.queryModule(loaderstate, ctx, curM.Path, q.version, r.initialSelected) if err != nil { reportError(q, err) continue @@ -979,7 +991,7 @@ func (r *resolver) checkWildcardVersions(ctx context.Context) { if !q.matchesPath(curM.Path) { m := module.Version{Path: curM.Path, Version: rev.Version} - packages, err := r.matchInModule(ctx, q.pattern, m) + packages, err := r.matchInModule(loaderstate, ctx, q.pattern, m) if err != nil { reportError(q, err) continue @@ -1013,14 +1025,14 @@ func (r *resolver) checkWildcardVersions(ctx context.Context) { // The candidate packages and modules for path literals depend only on the // initial build list, not the current build list, so we only need to query path // literals once. -func (r *resolver) performPathQueries(ctx context.Context) { +func (r *resolver) performPathQueries(loaderstate *modload.State, ctx context.Context) { for _, q := range r.pathQueries { q := q r.work.Add(func() { if q.version == "none" { - r.queryNone(ctx, q) + r.queryNone(loaderstate, ctx, q) } else { - r.queryPath(ctx, q) + r.queryPath(loaderstate, ctx, q) } }) } @@ -1031,7 +1043,7 @@ func (r *resolver) performPathQueries(ctx context.Context) { // The candidate set consists of all modules that could provide q.pattern // and have a version matching q, plus (if it exists) the module whose path // is itself q.pattern (at a matching version). -func (r *resolver) queryPath(ctx context.Context, q *query) { +func (r *resolver) queryPath(loaderstate *modload.State, ctx context.Context, q *query) { q.pathOnce(q.pattern, func() pathSet { if search.IsMetaPackage(q.pattern) || q.isWildcard() { panic(fmt.Sprintf("internal error: queryPath called with pattern %q", q.pattern)) @@ -1042,7 +1054,7 @@ func (r *resolver) queryPath(ctx context.Context, q *query) { if search.IsStandardImportPath(q.pattern) { stdOnly := module.Version{} - packages, _ := r.matchInModule(ctx, q.pattern, stdOnly) + packages, _ := r.matchInModule(loaderstate, ctx, q.pattern, stdOnly) if len(packages) > 0 { if q.rawVersion != "" { return errSet(fmt.Errorf("can't request explicit version %q of standard library package %s", q.version, q.pattern)) @@ -1053,7 +1065,7 @@ func (r *resolver) queryPath(ctx context.Context, q *query) { } } - pkgMods, mod, err := r.queryPattern(ctx, q.pattern, q.version, r.initialSelected) + pkgMods, mod, err := r.queryPattern(loaderstate, ctx, q.pattern, q.version, r.initialSelected) if err != nil { return errSet(err) } @@ -1063,11 +1075,11 @@ func (r *resolver) queryPath(ctx context.Context, q *query) { // performToolQueries populates the candidates for each query whose // pattern is "tool". -func (r *resolver) performToolQueries(ctx context.Context) { +func (r *resolver) performToolQueries(loaderstate *modload.State, ctx context.Context) { for _, q := range r.toolQueries { - for tool := range modload.MainModules.Tools() { + for tool := range loaderstate.MainModules.Tools() { q.pathOnce(tool, func() pathSet { - pkgMods, err := r.queryPackages(ctx, tool, q.version, r.initialSelected) + pkgMods, err := r.queryPackages(loaderstate, ctx, tool, q.version, r.initialSelected) return pathSet{pkgMods: pkgMods, err: err} }) } @@ -1076,22 +1088,22 @@ func (r *resolver) performToolQueries(ctx context.Context) { // performWorkQueries populates the candidates for each query whose pattern is "work". // The candidate module to resolve the work pattern is exactly the single main module. -func (r *resolver) performWorkQueries(ctx context.Context) { +func (r *resolver) performWorkQueries(loaderstate *modload.State, ctx context.Context) { for _, q := range r.workQueries { q.pathOnce(q.pattern, func() pathSet { // TODO(matloob): Maybe export MainModules.mustGetSingleMainModule and call that. // There are a few other places outside the modload package where we expect // a single main module. - if len(modload.MainModules.Versions()) != 1 { + if len(loaderstate.MainModules.Versions()) != 1 { panic("internal error: number of main modules is not exactly one in resolution phase of go get") } - mainModule := modload.MainModules.Versions()[0] + mainModule := loaderstate.MainModules.Versions()[0] // We know what the result is going to be, assuming the main module is not // empty, (it's the main module itself) but first check to see that there // are packages in the main module, so that if there aren't any, we can // return the expected warning that the pattern matched no packages. - match := modload.MatchInModule(ctx, q.pattern, mainModule, imports.AnyTags()) + match := modload.MatchInModule(loaderstate, ctx, q.pattern, mainModule, imports.AnyTags()) if len(match.Errs) > 0 { return pathSet{err: match.Errs[0]} } @@ -1113,7 +1125,7 @@ func (r *resolver) performWorkQueries(ctx context.Context) { // know which candidate is selected — and that selection may depend on the // results of other queries. We need to re-evaluate the "all" queries whenever // the module for one or more packages in "all" are resolved. -func (r *resolver) performPatternAllQueries(ctx context.Context) { +func (r *resolver) performPatternAllQueries(loaderstate *modload.State, ctx context.Context) { if len(r.patternAllQueries) == 0 { return } @@ -1122,7 +1134,7 @@ func (r *resolver) performPatternAllQueries(ctx context.Context) { versionOk = true for _, q := range r.patternAllQueries { q.pathOnce(path, func() pathSet { - pkgMods, err := r.queryPackages(ctx, path, q.version, r.initialSelected) + pkgMods, err := r.queryPackages(loaderstate, ctx, path, q.version, r.initialSelected) if len(pkgMods) != 1 || pkgMods[0] != m { // There are candidates other than m for the given path, so we can't // be certain that m will actually be the module selected to provide @@ -1137,7 +1149,7 @@ func (r *resolver) performPatternAllQueries(ctx context.Context) { return versionOk } - r.loadPackages(ctx, []string{"all"}, findPackage) + r.loadPackages(loaderstate, ctx, []string{"all"}, findPackage) // Since we built up the candidate lists concurrently, they may be in a // nondeterministic order. We want 'go get' to be fully deterministic, @@ -1157,7 +1169,7 @@ func (r *resolver) performPatternAllQueries(ctx context.Context) { // If the getU flag ("-u") is set, findAndUpgradeImports also returns a // pathSet for each module that is not constrained by any other // command-line argument and has an available matching upgrade. -func (r *resolver) findAndUpgradeImports(ctx context.Context, queries []*query) (upgrades []pathSet) { +func (r *resolver) findAndUpgradeImports(loaderstate *modload.State, ctx context.Context, queries []*query) (upgrades []pathSet) { patterns := make([]string, 0, len(queries)) for _, q := range queries { if q.matchesPackages { @@ -1198,7 +1210,7 @@ func (r *resolver) findAndUpgradeImports(ctx context.Context, queries []*query) // - The "-u" flag, unlike other arguments, does not cause version // conflicts with other queries. (The other query always wins.) - pkgMods, err := r.queryPackages(ctx, path, version, r.selected) + pkgMods, err := r.queryPackages(loaderstate, ctx, path, version, r.selected) for _, u := range pkgMods { if u == m { // The selected package version is already upgraded appropriately; there @@ -1229,7 +1241,7 @@ func (r *resolver) findAndUpgradeImports(ctx context.Context, queries []*query) return false } - r.loadPackages(ctx, patterns, findPackage) + r.loadPackages(loaderstate, ctx, patterns, findPackage) // Since we built up the candidate lists concurrently, they may be in a // nondeterministic order. We want 'go get' to be fully deterministic, @@ -1253,14 +1265,14 @@ func (r *resolver) findAndUpgradeImports(ctx context.Context, queries []*query) // loadPackages also invokes the findPackage function for each imported package // that is neither present in the standard library nor in any module in the // build list. -func (r *resolver) loadPackages(ctx context.Context, patterns []string, findPackage func(ctx context.Context, path string, m module.Version) (versionOk bool)) { +func (r *resolver) loadPackages(loaderstate *modload.State, ctx context.Context, patterns []string, findPackage func(ctx context.Context, path string, m module.Version) (versionOk bool)) { opts := modload.PackageOpts{ Tags: imports.AnyTags(), VendorModulesInGOROOTSrc: true, LoadTests: *getT, AssumeRootsImported: true, // After 'go get foo', imports of foo should build. SilencePackageErrors: true, // May be fixed by subsequent upgrades or downgrades. - Switcher: new(toolchain.Switcher), + Switcher: toolchain.NewSwitcher(loaderstate), } opts.AllowPackage = func(ctx context.Context, path string, m module.Version) error { @@ -1275,13 +1287,13 @@ func (r *resolver) loadPackages(ctx context.Context, patterns []string, findPack return nil } - _, pkgs := modload.LoadPackages(ctx, opts, patterns...) + _, pkgs := modload.LoadPackages(loaderstate, ctx, opts, patterns...) for _, pkgPath := range pkgs { const ( parentPath = "" parentIsStd = false ) - _, _, err := modload.Lookup(parentPath, parentIsStd, pkgPath) + _, _, err := modload.Lookup(loaderstate, parentPath, parentIsStd, pkgPath) if err == nil { continue } @@ -1294,15 +1306,13 @@ func (r *resolver) loadPackages(ctx context.Context, patterns []string, findPack continue } - var ( - importMissing *modload.ImportMissingError - ambiguous *modload.AmbiguousImportError - ) - if !errors.As(err, &importMissing) && !errors.As(err, &ambiguous) { - // The package, which is a dependency of something we care about, has some - // problem that we can't resolve with a version change. - // Leave the error for the final LoadPackages call. - continue + if _, ok := errors.AsType[*modload.ImportMissingError](err); !ok { + if _, ok := errors.AsType[*modload.AmbiguousImportError](err); !ok { + // The package, which is a dependency of something we care about, has some + // problem that we can't resolve with a version change. + // Leave the error for the final LoadPackages call. + continue + } } path := pkgPath @@ -1329,7 +1339,7 @@ var errVersionChange = errors.New("version change needed") // // If all pathSets are resolved without any changes to the build list, // resolveQueries returns with changed=false. -func (r *resolver) resolveQueries(ctx context.Context, queries []*query) (changed bool) { +func (r *resolver) resolveQueries(loaderstate *modload.State, ctx context.Context, queries []*query) (changed bool) { defer base.ExitIfErrors() // Note: this is O(N²) with the number of pathSets in the worst case. @@ -1348,7 +1358,7 @@ func (r *resolver) resolveQueries(ctx context.Context, queries []*query) (change // If we found modules that were too new, find the max of the required versions // and then try to switch to a newer toolchain. - var sw toolchain.Switcher + sw := toolchain.NewSwitcher(loaderstate) for _, q := range queries { for _, cs := range q.candidates { sw.Error(cs.err) @@ -1373,7 +1383,7 @@ func (r *resolver) resolveQueries(ctx context.Context, queries []*query) (change continue } - filtered, isPackage, m, unique := r.disambiguate(cs) + filtered, isPackage, m, unique := r.disambiguate(loaderstate, cs) if !unique { unresolved = append(unresolved, filtered) continue @@ -1387,7 +1397,7 @@ func (r *resolver) resolveQueries(ctx context.Context, queries []*query) (change if isPackage { q.matchesPackages = true } - r.resolve(q, m) + r.resolve(loaderstate, q, m) resolved++ } @@ -1401,7 +1411,7 @@ func (r *resolver) resolveQueries(ctx context.Context, queries []*query) (change } if resolved > 0 { - if changed = r.updateBuildList(ctx, nil); changed { + if changed = r.updateBuildList(loaderstate, ctx, nil); changed { // The build list has changed, so disregard any remaining ambiguous queries: // they might now be determined by requirements in the build list, which we // would prefer to use instead of arbitrary versions. @@ -1423,12 +1433,12 @@ func (r *resolver) resolveQueries(ctx context.Context, queries []*query) (change if isPackage { q.matchesPackages = true } - r.resolve(q, m) + r.resolve(loaderstate, q, m) resolvedArbitrarily++ } } if resolvedArbitrarily > 0 { - changed = r.updateBuildList(ctx, nil) + changed = r.updateBuildList(loaderstate, ctx, nil) } return changed } @@ -1444,7 +1454,7 @@ func (r *resolver) resolveQueries(ctx context.Context, queries []*query) (change // // If all pathSets are resolved without any changes to the build list, // applyUpgrades returns with changed=false. -func (r *resolver) applyUpgrades(ctx context.Context, upgrades []pathSet) (changed bool) { +func (r *resolver) applyUpgrades(loaderstate *modload.State, ctx context.Context, upgrades []pathSet) (changed bool) { defer base.ExitIfErrors() // Arbitrarily add a "latest" version that provides each missing package, but @@ -1457,7 +1467,7 @@ func (r *resolver) applyUpgrades(ctx context.Context, upgrades []pathSet) (chang continue } - filtered, _, m, unique := r.disambiguate(cs) + filtered, _, m, unique := r.disambiguate(loaderstate, cs) if !unique { _, m = r.chooseArbitrarily(filtered) } @@ -1470,7 +1480,7 @@ func (r *resolver) applyUpgrades(ctx context.Context, upgrades []pathSet) (chang } base.ExitIfErrors() - changed = r.updateBuildList(ctx, tentative) + changed = r.updateBuildList(loaderstate, ctx, tentative) return changed } @@ -1484,7 +1494,7 @@ func (r *resolver) applyUpgrades(ctx context.Context, upgrades []pathSet) (chang // In the vast majority of cases, we expect only one module per pathSet, // but we want to give some minimal additional tools so that users can add an // extra argument or two on the command line to resolve simple ambiguities. -func (r *resolver) disambiguate(cs pathSet) (filtered pathSet, isPackage bool, m module.Version, unique bool) { +func (r *resolver) disambiguate(s *modload.State, cs pathSet) (filtered pathSet, isPackage bool, m module.Version, unique bool) { if len(cs.pkgMods) == 0 && cs.mod.Path == "" { panic("internal error: resolveIfUnambiguous called with empty pathSet") } @@ -1496,7 +1506,7 @@ func (r *resolver) disambiguate(cs pathSet) (filtered pathSet, isPackage bool, m continue } - if modload.MainModules.Contains(m.Path) { + if s.MainModules.Contains(m.Path) { if m.Version == "" { return pathSet{}, true, m, true } @@ -1601,7 +1611,7 @@ func (r *resolver) chooseArbitrarily(cs pathSet) (isPackage bool, m module.Versi // We skip missing-package errors earlier in the process, since we want to // resolve pathSets ourselves, but at that point, we don't have enough context // to log the package-import chains leading to each error. -func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []string) { +func (r *resolver) checkPackageProblems(loaderstate *modload.State, ctx context.Context, pkgPatterns []string) { defer base.ExitIfErrors() // Enter workspace mode, if the current main module would belong to it, when @@ -1612,16 +1622,16 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin // info, but switch back to single module mode when fetching sums so that we update // the single module's go.sum file. var exitWorkspace func() - if r.workspace != nil && r.workspace.hasModule(modload.MainModules.Versions()[0].Path) { + if r.workspace != nil && r.workspace.hasModule(loaderstate.MainModules.Versions()[0].Path) { var err error - exitWorkspace, err = modload.EnterWorkspace(ctx) + exitWorkspace, err = modload.EnterWorkspace(loaderstate, ctx) if err != nil { // A TooNewError can happen for // go get go@newversion when all the required modules // are old enough but the go command itself is not new // enough. See the related comment on the SwitchOrFatal // in runGet when WriteGoMod returns an error. - toolchain.SwitchOrFatal(ctx, err) + toolchain.SwitchOrFatal(loaderstate, ctx, err) } } @@ -1653,7 +1663,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin AllowErrors: true, SilenceNoGoErrors: true, } - matches, pkgs := modload.LoadPackages(ctx, pkgOpts, pkgPatterns...) + matches, pkgs := modload.LoadPackages(loaderstate, ctx, pkgOpts, pkgPatterns...) for _, m := range matches { if len(m.Errs) > 0 { base.SetExitStatus(1) @@ -1661,7 +1671,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin } } for _, pkg := range pkgs { - if dir, _, err := modload.Lookup("", false, pkg); err != nil { + if dir, _, err := modload.Lookup(loaderstate, "", false, pkg); err != nil { if dir != "" && errors.Is(err, imports.ErrNoGo) { // Since dir is non-empty, we must have located source files // associated with either the package or its test — ErrNoGo must @@ -1674,7 +1684,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin } base.SetExitStatus(1) - if ambiguousErr := (*modload.AmbiguousImportError)(nil); errors.As(err, &ambiguousErr) { + if ambiguousErr, ok := errors.AsType[*modload.AmbiguousImportError](err); ok { for _, m := range ambiguousErr.Modules { relevantMods[m] |= hasPkg } @@ -1692,7 +1702,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin } } - reqs := modload.LoadModFile(ctx) + reqs := modload.LoadModFile(loaderstate, ctx) for m := range relevantMods { if reqs.IsDirect(m.Path) { relevantMods[m] |= direct @@ -1716,8 +1726,8 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin for i := range retractions { i := i r.work.Add(func() { - err := modload.CheckRetractions(ctx, retractions[i].m) - if retractErr := (*modload.ModuleRetractedError)(nil); errors.As(err, &retractErr) { + err := loaderstate.CheckRetractions(ctx, retractions[i].m) + if _, ok := errors.AsType[*modload.ModuleRetractedError](err); ok { retractions[i].message = err.Error() } }) @@ -1737,7 +1747,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin for i := range deprecations { i := i r.work.Add(func() { - deprecation, err := modload.CheckDeprecation(ctx, deprecations[i].m) + deprecation, err := modload.CheckDeprecation(loaderstate, ctx, deprecations[i].m) if err != nil || deprecation == "" { return } @@ -1767,7 +1777,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin i := i m := r.buildList[i] mActual := m - if mRepl := modload.Replacement(m); mRepl.Path != "" { + if mRepl := modload.Replacement(loaderstate, m); mRepl.Path != "" { mActual = mRepl } old := module.Version{Path: m.Path, Version: r.initialVersion[m.Path]} @@ -1775,7 +1785,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin continue } oldActual := old - if oldRepl := modload.Replacement(old); oldRepl.Path != "" { + if oldRepl := modload.Replacement(loaderstate, old); oldRepl.Path != "" { oldActual = oldRepl } if mActual == oldActual || mActual.Version == "" || !modfetch.HaveSum(oldActual) { @@ -1946,16 +1956,17 @@ func (r *resolver) reportChanges(oldReqs, newReqs []module.Version) { // resolve records that module m must be at its indicated version (which may be // "none") due to query q. If some other query forces module m to be at a // different version, resolve reports a conflict error. -func (r *resolver) resolve(q *query, m module.Version) { +func (r *resolver) resolve(s *modload.State, q *query, m module.Version) { if m.Path == "" { panic("internal error: resolving a module.Version with an empty path") } - if modload.MainModules.Contains(m.Path) && m.Version != "" { + if s.MainModules.Contains(m.Path) && m.Version != "" { reportError(q, &modload.QueryMatchesMainModulesError{ - MainModules: []module.Version{{Path: m.Path}}, - Pattern: q.pattern, - Query: q.version, + MainModules: []module.Version{{Path: m.Path}}, + Pattern: q.pattern, + Query: q.version, + PatternIsModule: s.MainModules.Contains(q.pattern), }) return } @@ -1978,24 +1989,24 @@ func (r *resolver) resolve(q *query, m module.Version) { // // If the resulting build list is the same as the one resulting from the last // call to updateBuildList, updateBuildList returns with changed=false. -func (r *resolver) updateBuildList(ctx context.Context, additions []module.Version) (changed bool) { +func (r *resolver) updateBuildList(loaderstate *modload.State, ctx context.Context, additions []module.Version) (changed bool) { defer base.ExitIfErrors() resolved := make([]module.Version, 0, len(r.resolvedVersion)) for mPath, rv := range r.resolvedVersion { - if !modload.MainModules.Contains(mPath) { + if !loaderstate.MainModules.Contains(mPath) { resolved = append(resolved, module.Version{Path: mPath, Version: rv.version}) } } - changed, err := modload.EditBuildList(ctx, additions, resolved) + changed, err := modload.EditBuildList(loaderstate, ctx, additions, resolved) if err != nil { if errors.Is(err, gover.ErrTooNew) { - toolchain.SwitchOrFatal(ctx, err) + toolchain.SwitchOrFatal(loaderstate, ctx, err) } - var constraint *modload.ConstraintError - if !errors.As(err, &constraint) { + constraint, ok := errors.AsType[*modload.ConstraintError](err) + if !ok { base.Fatal(err) } @@ -2035,9 +2046,9 @@ func (r *resolver) updateBuildList(ctx context.Context, additions []module.Versi return false } - mg, err := modload.LoadModGraph(ctx, "") + mg, err := modload.LoadModGraph(loaderstate, ctx, "") if err != nil { - toolchain.SwitchOrFatal(ctx, err) + toolchain.SwitchOrFatal(loaderstate, ctx, err) } r.buildList = mg.BuildList() @@ -2066,8 +2077,11 @@ func reqsFromGoMod(f *modfile.File) []module.Version { // does not exist at the requested version, either because the module does not // exist at all or because it does not include that specific version. func isNoSuchModuleVersion(err error) bool { - var noMatch *modload.NoMatchingVersionError - return errors.Is(err, os.ErrNotExist) || errors.As(err, &noMatch) + if errors.Is(err, os.ErrNotExist) { + return true + } + _, ok := errors.AsType[*modload.NoMatchingVersionError](err) + return ok } // isNoSuchPackageVersion reports whether err indicates that the requested @@ -2075,8 +2089,11 @@ func isNoSuchModuleVersion(err error) bool { // that could contain it exists at that version, or because every such module // that does exist does not actually contain the package. func isNoSuchPackageVersion(err error) bool { - var noPackage *modload.PackageNotInModuleError - return isNoSuchModuleVersion(err) || errors.As(err, &noPackage) + if isNoSuchModuleVersion(err) { + return true + } + _, ok := errors.AsType[*modload.PackageNotInModuleError](err) + return ok } // workspace represents the set of modules in a workspace. diff --git a/src/cmd/go/internal/modget/query.go b/src/cmd/go/internal/modget/query.go index 05872d52ec4..3086dbc1ad6 100644 --- a/src/cmd/go/internal/modget/query.go +++ b/src/cmd/go/internal/modget/query.go @@ -139,7 +139,7 @@ func errSet(err error) pathSet { return pathSet{err: err} } // newQuery returns a new query parsed from the raw argument, // which must be either path or path@version. -func newQuery(raw string) (*query, error) { +func newQuery(loaderstate *modload.State, raw string) (*query, error) { pattern, rawVers, found := strings.Cut(raw, "@") if found && (strings.Contains(rawVers, "@") || rawVers == "") { return nil, fmt.Errorf("invalid module version syntax %q", raw) @@ -167,14 +167,14 @@ func newQuery(raw string) (*query, error) { q.matchWildcard = pkgpattern.MatchPattern(q.pattern) q.canMatchWildcardInModule = pkgpattern.TreeCanMatchPattern(q.pattern) } - if err := q.validate(); err != nil { + if err := q.validate(loaderstate); err != nil { return q, err } return q, nil } // validate reports a non-nil error if q is not sensible and well-formed. -func (q *query) validate() error { +func (q *query) validate(loaderstate *modload.State) error { if q.patternIsLocal { if q.rawVersion != "" { return fmt.Errorf("can't request explicit version %q of path %q in main module", q.rawVersion, q.pattern) @@ -184,15 +184,15 @@ func (q *query) validate() error { if q.pattern == "all" { // If there is no main module, "all" is not meaningful. - if !modload.HasModRoot() { - return fmt.Errorf(`cannot match "all": %v`, modload.ErrNoModRoot) + if !loaderstate.HasModRoot() { + return fmt.Errorf(`cannot match "all": %v`, modload.NewNoMainModulesError(loaderstate)) } if !versionOkForMainModule(q.version) { // TODO(bcmills): "all@none" seems like a totally reasonable way to // request that we remove all module requirements, leaving only the main // module and standard library. Perhaps we should implement that someday. return &modload.QueryUpgradesAllError{ - MainModules: modload.MainModules.Versions(), + MainModules: loaderstate.MainModules.Versions(), Query: q.version, } } @@ -283,7 +283,7 @@ func reportError(q *query, err error) { // If err already mentions all of the relevant parts of q, just log err to // reduce stutter. Otherwise, log both q and err. // - // TODO(bcmills): Use errors.As to unpack these errors instead of parsing + // TODO(bcmills): Use errors.AsType to unpack these errors instead of parsing // strings with regular expressions. if !utf8.ValidString(q.pattern) || !utf8.ValidString(q.version) { diff --git a/src/cmd/go/internal/modindex/build.go b/src/cmd/go/internal/modindex/build.go index 0fa78afe2c0..053e04dfe59 100644 --- a/src/cmd/go/internal/modindex/build.go +++ b/src/cmd/go/internal/modindex/build.go @@ -296,9 +296,9 @@ func cleanDecls(m map[string][]token.Position) ([]string, map[string][]token.Pos } var ( - bSlashSlash = []byte(slashSlash) - bStarSlash = []byte(starSlash) - bSlashStar = []byte(slashStar) + bSlashSlash = slashSlash + bStarSlash = starSlash + bSlashStar = slashStar bPlusBuild = []byte("+build") goBuildComment = []byte("//go:build") diff --git a/src/cmd/go/internal/modindex/build_read.go b/src/cmd/go/internal/modindex/build_read.go index f05b215c19e..f2e48f36cdf 100644 --- a/src/cmd/go/internal/modindex/build_read.go +++ b/src/cmd/go/internal/modindex/build_read.go @@ -92,37 +92,24 @@ func (r *importReader) readByte() byte { return c } -// readByteNoBuf is like readByte but doesn't buffer the byte. -// It exhausts r.buf before reading from r.b. -func (r *importReader) readByteNoBuf() byte { - var c byte - var err error - if len(r.buf) > 0 { - c = r.buf[0] - r.buf = r.buf[1:] - } else { - c, err = r.b.ReadByte() - if err == nil && c == 0 { - err = errNUL +// readRest reads the entire rest of the file into r.buf. +func (r *importReader) readRest() { + for { + if len(r.buf) == cap(r.buf) { + // Grow the buffer + r.buf = append(r.buf, 0)[:len(r.buf)] + } + n, err := r.b.Read(r.buf[len(r.buf):cap(r.buf)]) + r.buf = r.buf[:len(r.buf)+n] + if err != nil { + if err == io.EOF { + r.eof = true + } else if r.err == nil { + r.err = err + } + break } } - - if err != nil { - if err == io.EOF { - r.eof = true - } else if r.err == nil { - r.err = err - } - return 0 - } - r.pos.Offset++ - if c == '\n' { - r.pos.Line++ - r.pos.Column = 1 - } else { - r.pos.Column++ - } - return c } // peekByte returns the next byte from the input reader but does not advance beyond it. @@ -185,130 +172,6 @@ func (r *importReader) nextByte(skipSpace bool) byte { return c } -var goEmbed = []byte("go:embed") - -// findEmbed advances the input reader to the next //go:embed comment. -// It reports whether it found a comment. -// (Otherwise it found an error or EOF.) -func (r *importReader) findEmbed(first bool) bool { - // The import block scan stopped after a non-space character, - // so the reader is not at the start of a line on the first call. - // After that, each //go:embed extraction leaves the reader - // at the end of a line. - startLine := !first - var c byte - for r.err == nil && !r.eof { - c = r.readByteNoBuf() - Reswitch: - switch c { - default: - startLine = false - - case '\n': - startLine = true - - case ' ', '\t': - // leave startLine alone - - case '"': - startLine = false - for r.err == nil { - if r.eof { - r.syntaxError() - } - c = r.readByteNoBuf() - if c == '\\' { - r.readByteNoBuf() - if r.err != nil { - r.syntaxError() - return false - } - continue - } - if c == '"' { - c = r.readByteNoBuf() - goto Reswitch - } - } - goto Reswitch - - case '`': - startLine = false - for r.err == nil { - if r.eof { - r.syntaxError() - } - c = r.readByteNoBuf() - if c == '`' { - c = r.readByteNoBuf() - goto Reswitch - } - } - - case '\'': - startLine = false - for r.err == nil { - if r.eof { - r.syntaxError() - } - c = r.readByteNoBuf() - if c == '\\' { - r.readByteNoBuf() - if r.err != nil { - r.syntaxError() - return false - } - continue - } - if c == '\'' { - c = r.readByteNoBuf() - goto Reswitch - } - } - - case '/': - c = r.readByteNoBuf() - switch c { - default: - startLine = false - goto Reswitch - - case '*': - var c1 byte - for (c != '*' || c1 != '/') && r.err == nil { - if r.eof { - r.syntaxError() - } - c, c1 = c1, r.readByteNoBuf() - } - startLine = false - - case '/': - if startLine { - // Try to read this as a //go:embed comment. - for i := range goEmbed { - c = r.readByteNoBuf() - if c != goEmbed[i] { - goto SkipSlashSlash - } - } - c = r.readByteNoBuf() - if c == ' ' || c == '\t' { - // Found one! - return true - } - } - SkipSlashSlash: - for c != '\n' && r.err == nil && !r.eof { - c = r.readByteNoBuf() - } - startLine = true - } - } - } - return false -} - // readKeyword reads the given keyword from the input. // If the keyword is not present, readKeyword records a syntax error. func (r *importReader) readKeyword(kw string) { @@ -429,9 +292,7 @@ func readGoInfo(f io.Reader, info *fileInfo) error { // we are sure we don't change the errors that go/parser returns. if r.err == errSyntax { r.err = nil - for r.err == nil && !r.eof { - r.readByte() - } + r.readRest() info.header = r.buf } if r.err != nil { @@ -504,23 +365,23 @@ func readGoInfo(f io.Reader, info *fileInfo) error { // (near the package statement or imports), the compiler // will reject them. They can be (and have already been) ignored. if hasEmbed { - var line []byte - for first := true; r.findEmbed(first); first = false { - line = line[:0] - pos := r.pos - for { - c := r.readByteNoBuf() - if c == '\n' || r.err != nil || r.eof { - break - } - line = append(line, c) + r.readRest() + fset := token.NewFileSet() + file := fset.AddFile(r.pos.Filename, -1, len(r.buf)) + var sc scanner.Scanner + sc.Init(file, r.buf, nil, scanner.ScanComments) + for { + pos, tok, lit := sc.Scan() + if tok == token.EOF { + break } - // Add args if line is well-formed. - // Ignore badly-formed lines - the compiler will report them when it finds them, - // and we can pretend they are not there to help go list succeed with what it knows. - embs, err := parseGoEmbed(string(line), pos) - if err == nil { - info.embeds = append(info.embeds, embs...) + if tok == token.COMMENT && strings.HasPrefix(lit, "//go:embed") { + // Ignore badly-formed lines - the compiler will report them when it finds them, + // and we can pretend they are not there to help go list succeed with what it knows. + embs, err := parseGoEmbed(fset, pos, lit) + if err == nil { + info.embeds = append(info.embeds, embs...) + } } } } @@ -542,75 +403,21 @@ func isValidImport(s string) bool { return s != "" } -// parseGoEmbed parses the text following "//go:embed" to extract the glob patterns. +// parseGoEmbed parses a "//go:embed" to extract the glob patterns. // It accepts unquoted space-separated patterns as well as double-quoted and back-quoted Go strings. -// This is based on a similar function in cmd/compile/internal/gc/noder.go; -// this version calculates position information as well. -func parseGoEmbed(args string, pos token.Position) ([]fileEmbed, error) { - trimBytes := func(n int) { - pos.Offset += n - pos.Column += utf8.RuneCountInString(args[:n]) - args = args[n:] +// This must match the behavior of cmd/compile/internal/noder.go. +func parseGoEmbed(fset *token.FileSet, pos token.Pos, comment string) ([]fileEmbed, error) { + dir, ok := ast.ParseDirective(pos, comment) + if !ok || dir.Tool != "go" || dir.Name != "embed" { + return nil, nil } - trimSpace := func() { - trim := strings.TrimLeftFunc(args, unicode.IsSpace) - trimBytes(len(args) - len(trim)) + args, err := dir.ParseArgs() + if err != nil { + return nil, err } - var list []fileEmbed - for trimSpace(); args != ""; trimSpace() { - var path string - pathPos := pos - Switch: - switch args[0] { - default: - i := len(args) - for j, c := range args { - if unicode.IsSpace(c) { - i = j - break - } - } - path = args[:i] - trimBytes(i) - - case '`': - var ok bool - path, _, ok = strings.Cut(args[1:], "`") - if !ok { - return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args) - } - trimBytes(1 + len(path) + 1) - - case '"': - i := 1 - for ; i < len(args); i++ { - if args[i] == '\\' { - i++ - continue - } - if args[i] == '"' { - q, err := strconv.Unquote(args[:i+1]) - if err != nil { - return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args[:i+1]) - } - path = q - trimBytes(i + 1) - break Switch - } - } - if i >= len(args) { - return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args) - } - } - - if args != "" { - r, _ := utf8.DecodeRuneInString(args) - if !unicode.IsSpace(r) { - return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args) - } - } - list = append(list, fileEmbed{path, pathPos}) + for _, arg := range args { + list = append(list, fileEmbed{arg.Arg, fset.Position(arg.Pos)}) } return list, nil } diff --git a/src/cmd/go/internal/modload/build.go b/src/cmd/go/internal/modload/build.go index 6e30afd5247..4f334a47203 100644 --- a/src/cmd/go/internal/modload/build.go +++ b/src/cmd/go/internal/modload/build.go @@ -51,8 +51,8 @@ func findStandardImportPath(path string) string { // a given package. If modules are not enabled or if the package is in the // standard library or if the package was not successfully loaded with // LoadPackages or ImportFromFiles, nil is returned. -func PackageModuleInfo(ctx context.Context, pkgpath string) *modinfo.ModulePublic { - if isStandardImportPath(pkgpath) || !Enabled() { +func PackageModuleInfo(loaderstate *State, ctx context.Context, pkgpath string) *modinfo.ModulePublic { + if isStandardImportPath(pkgpath) || !loaderstate.Enabled() { return nil } m, ok := findModule(loaded, pkgpath) @@ -60,50 +60,50 @@ func PackageModuleInfo(ctx context.Context, pkgpath string) *modinfo.ModulePubli return nil } - rs := LoadModFile(ctx) - return moduleInfo(ctx, rs, m, 0, nil) + rs := LoadModFile(loaderstate, ctx) + return moduleInfo(loaderstate, ctx, rs, m, 0, nil) } // PackageModRoot returns the module root directory for the module that provides // a given package. If modules are not enabled or if the package is in the // standard library or if the package was not successfully loaded with // LoadPackages or ImportFromFiles, the empty string is returned. -func PackageModRoot(ctx context.Context, pkgpath string) string { - if isStandardImportPath(pkgpath) || !Enabled() || cfg.BuildMod == "vendor" { +func PackageModRoot(loaderstate *State, ctx context.Context, pkgpath string) string { + if isStandardImportPath(pkgpath) || !loaderstate.Enabled() || cfg.BuildMod == "vendor" { return "" } m, ok := findModule(loaded, pkgpath) if !ok { return "" } - root, _, err := fetch(ctx, m) + root, _, err := fetch(loaderstate, ctx, m) if err != nil { return "" } return root } -func ModuleInfo(ctx context.Context, path string) *modinfo.ModulePublic { - if !Enabled() { +func ModuleInfo(loaderstate *State, ctx context.Context, path string) *modinfo.ModulePublic { + if !loaderstate.Enabled() { return nil } if path, vers, found := strings.Cut(path, "@"); found { m := module.Version{Path: path, Version: vers} - return moduleInfo(ctx, nil, m, 0, nil) + return moduleInfo(loaderstate, ctx, nil, m, 0, nil) } - rs := LoadModFile(ctx) + rs := LoadModFile(loaderstate, ctx) var ( v string ok bool ) if rs.pruning == pruned { - v, ok = rs.rootSelected(path) + v, ok = rs.rootSelected(loaderstate, path) } if !ok { - mg, err := rs.Graph(ctx) + mg, err := rs.Graph(loaderstate, ctx) if err != nil { base.Fatal(err) } @@ -119,21 +119,20 @@ func ModuleInfo(ctx context.Context, path string) *modinfo.ModulePublic { } } - return moduleInfo(ctx, rs, module.Version{Path: path, Version: v}, 0, nil) + return moduleInfo(loaderstate, ctx, rs, module.Version{Path: path, Version: v}, 0, nil) } // addUpdate fills in m.Update if an updated version is available. -func addUpdate(ctx context.Context, m *modinfo.ModulePublic) { +func addUpdate(loaderstate *State, ctx context.Context, m *modinfo.ModulePublic) { if m.Version == "" { return } - info, err := Query(ctx, m.Path, "upgrade", m.Version, CheckAllowed) - var noVersionErr *NoMatchingVersionError - if errors.Is(err, ErrDisallowed) || + info, err := Query(loaderstate, ctx, m.Path, "upgrade", m.Version, loaderstate.CheckAllowed) + if _, ok := errors.AsType[*NoMatchingVersionError](err); ok || errors.Is(err, fs.ErrNotExist) || - errors.As(err, &noVersionErr) { - // Ignore "not found" and "no matching version" errors. + errors.Is(err, ErrDisallowed) { + // Ignore "no matching version" and "not found" errors. // This means the proxy has no matching version or no versions at all. // // Ignore "disallowed" errors. This means the current version is @@ -190,6 +189,12 @@ func mergeOrigin(m1, m2 *codehost.Origin) *codehost.Origin { merged.TagSum = m2.TagSum merged.TagPrefix = m2.TagPrefix } + if m2.RepoSum != "" { + if m1.RepoSum != "" && m1.RepoSum != m2.RepoSum { + return nil + } + merged.RepoSum = m2.RepoSum + } if m2.Ref != "" { if m1.Ref != "" && m1.Ref != m2.Ref { return nil @@ -213,16 +218,16 @@ func mergeOrigin(m1, m2 *codehost.Origin) *codehost.Origin { // addVersions fills in m.Versions with the list of known versions. // Excluded versions will be omitted. If listRetracted is false, retracted // versions will also be omitted. -func addVersions(ctx context.Context, m *modinfo.ModulePublic, listRetracted bool) { +func addVersions(loaderstate *State, ctx context.Context, m *modinfo.ModulePublic, listRetracted bool) { // TODO(bcmills): Would it make sense to check for reuse here too? // Perhaps that doesn't buy us much, though: we would always have to fetch // all of the version tags to list the available versions anyway. - allowed := CheckAllowed + allowed := loaderstate.CheckAllowed if listRetracted { - allowed = CheckExclusions + allowed = loaderstate.CheckExclusions } - v, origin, err := versions(ctx, m.Path, allowed) + v, origin, err := versions(loaderstate, ctx, m.Path, allowed) if err != nil && m.Error == nil { m.Error = &modinfo.ModuleError{Err: err.Error()} } @@ -232,16 +237,16 @@ func addVersions(ctx context.Context, m *modinfo.ModulePublic, listRetracted boo // addRetraction fills in m.Retracted if the module was retracted by its author. // m.Error is set if there's an error loading retraction information. -func addRetraction(ctx context.Context, m *modinfo.ModulePublic) { +func addRetraction(loaderstate *State, ctx context.Context, m *modinfo.ModulePublic) { if m.Version == "" { return } - err := CheckRetractions(ctx, module.Version{Path: m.Path, Version: m.Version}) - var noVersionErr *NoMatchingVersionError - var retractErr *ModuleRetractedError - if err == nil || errors.Is(err, fs.ErrNotExist) || errors.As(err, &noVersionErr) { - // Ignore "not found" and "no matching version" errors. + err := loaderstate.CheckRetractions(ctx, module.Version{Path: m.Path, Version: m.Version}) + if err == nil { + return + } else if _, ok := errors.AsType[*NoMatchingVersionError](err); ok || errors.Is(err, fs.ErrNotExist) { + // Ignore "no matching version" and "not found" errors. // This means the proxy has no matching version or no versions at all. // // We should report other errors though. An attacker that controls the @@ -250,7 +255,7 @@ func addRetraction(ctx context.Context, m *modinfo.ModulePublic) { // hide versions, since the "list" and "latest" endpoints are not // authenticated. return - } else if errors.As(err, &retractErr) { + } else if retractErr, ok := errors.AsType[*ModuleRetractedError](err); ok { if len(retractErr.Rationale) == 0 { m.Retracted = []string{"retracted by module author"} } else { @@ -263,11 +268,10 @@ func addRetraction(ctx context.Context, m *modinfo.ModulePublic) { // addDeprecation fills in m.Deprecated if the module was deprecated by its // author. m.Error is set if there's an error loading deprecation information. -func addDeprecation(ctx context.Context, m *modinfo.ModulePublic) { - deprecation, err := CheckDeprecation(ctx, module.Version{Path: m.Path, Version: m.Version}) - var noVersionErr *NoMatchingVersionError - if errors.Is(err, fs.ErrNotExist) || errors.As(err, &noVersionErr) { - // Ignore "not found" and "no matching version" errors. +func addDeprecation(loaderstate *State, ctx context.Context, m *modinfo.ModulePublic) { + deprecation, err := CheckDeprecation(loaderstate, ctx, module.Version{Path: m.Path, Version: m.Version}) + if _, ok := errors.AsType[*NoMatchingVersionError](err); ok || errors.Is(err, fs.ErrNotExist) { + // Ignore "no matching version" and "not found" errors. // This means the proxy has no matching version or no versions at all. // // We should report other errors though. An attacker that controls the @@ -289,8 +293,8 @@ func addDeprecation(ctx context.Context, m *modinfo.ModulePublic) { // moduleInfo returns information about module m, loaded from the requirements // in rs (which may be nil to indicate that m was not loaded from a requirement // graph). -func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode ListMode, reuse map[module.Version]*modinfo.ModulePublic) *modinfo.ModulePublic { - if m.Version == "" && MainModules.Contains(m.Path) { +func moduleInfo(loaderstate *State, ctx context.Context, rs *Requirements, m module.Version, mode ListMode, reuse map[module.Version]*modinfo.ModulePublic) *modinfo.ModulePublic { + if m.Version == "" && loaderstate.MainModules.Contains(m.Path) { info := &modinfo.ModulePublic{ Path: m.Path, Version: m.Version, @@ -301,7 +305,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li } else { panic("internal error: GoVersion not set for main module") } - if modRoot := MainModules.ModRoot(m); modRoot != "" { + if modRoot := loaderstate.MainModules.ModRoot(m); modRoot != "" { info.Dir = modRoot info.GoMod = modFilePath(modRoot) } @@ -324,7 +328,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li } checksumOk := func(suffix string) bool { - return rs == nil || m.Version == "" || !mustHaveSums() || + return rs == nil || m.Version == "" || !mustHaveSums(loaderstate) || modfetch.HaveSum(module.Version{Path: m.Path, Version: m.Version + suffix}) } @@ -332,7 +336,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li if m.Version != "" { if old := reuse[mod]; old != nil { - if err := checkReuse(ctx, mod, old.Origin); err == nil { + if err := checkReuse(loaderstate, ctx, mod, old.Origin); err == nil { *m = *old m.Query = "" m.Dir = "" @@ -340,7 +344,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li } } - if q, err := Query(ctx, m.Path, m.Version, "", nil); err != nil { + if q, err := Query(loaderstate, ctx, m.Path, m.Version, "", nil); err != nil { m.Error = &modinfo.ModuleError{Err: err.Error()} } else { m.Version = q.Version @@ -351,7 +355,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li if m.GoVersion == "" && checksumOk("/go.mod") { // Load the go.mod file to determine the Go version, since it hasn't // already been populated from rawGoVersion. - if summary, err := rawGoModSummary(mod); err == nil && summary.goVersion != "" { + if summary, err := rawGoModSummary(loaderstate, mod); err == nil && summary.goVersion != "" { m.GoVersion = summary.goVersion } } @@ -379,7 +383,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li } if mode&ListRetracted != 0 { - addRetraction(ctx, m) + addRetraction(loaderstate, ctx, m) } } } @@ -391,7 +395,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li return info } - r := Replacement(m) + r := Replacement(loaderstate, m) if r.Path == "" { if cfg.BuildMod == "vendor" { // It's tempting to fill in the "Dir" field to point within the vendor @@ -420,7 +424,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li if filepath.IsAbs(r.Path) { info.Replace.Dir = r.Path } else { - info.Replace.Dir = filepath.Join(replaceRelativeTo(), r.Path) + info.Replace.Dir = filepath.Join(replaceRelativeTo(loaderstate), r.Path) } info.Replace.GoMod = filepath.Join(info.Replace.Dir, "go.mod") } diff --git a/src/cmd/go/internal/modload/buildlist.go b/src/cmd/go/internal/modload/buildlist.go index 2ba04f707b5..37c2a6c759f 100644 --- a/src/cmd/go/internal/modload/buildlist.go +++ b/src/cmd/go/internal/modload/buildlist.go @@ -85,16 +85,6 @@ type cachedGraph struct { err error // If err is non-nil, mg may be incomplete (but must still be non-nil). } -// requirements is the requirement graph for the main module. -// -// It is always non-nil if the main module's go.mod file has been loaded. -// -// This variable should only be read from the loadModFile function, and should -// only be written in the loadModFile and commitRequirements functions. -// All other functions that need or produce a *Requirements should -// accept and/or return an explicit parameter. -var requirements *Requirements - func mustHaveGoRoot(roots []module.Version) { for _, m := range roots { if m.Path == "go" { @@ -114,21 +104,21 @@ func mustHaveGoRoot(roots []module.Version) { // // If vendoring is in effect, the caller must invoke initVendor on the returned // *Requirements before any other method. -func newRequirements(pruning modPruning, rootModules []module.Version, direct map[string]bool) *Requirements { +func newRequirements(loaderstate *State, pruning modPruning, rootModules []module.Version, direct map[string]bool) *Requirements { mustHaveGoRoot(rootModules) if pruning != workspace { - if workFilePath != "" { + if loaderstate.workFilePath != "" { panic("in workspace mode, but pruning is not workspace in newRequirements") } } if pruning != workspace { - if workFilePath != "" { + if loaderstate.workFilePath != "" { panic("in workspace mode, but pruning is not workspace in newRequirements") } for i, m := range rootModules { - if m.Version == "" && MainModules.Contains(m.Path) { + if m.Version == "" && loaderstate.MainModules.Contains(m.Path) { panic(fmt.Sprintf("newRequirements called with untrimmed build list: rootModules[%v] is a main module", i)) } if m.Path == "" || m.Version == "" { @@ -172,10 +162,10 @@ func (rs *Requirements) String() string { // initVendor initializes rs.graph from the given list of vendored module // dependencies, overriding the graph that would normally be loaded from module // requirements. -func (rs *Requirements) initVendor(vendorList []module.Version) { +func (rs *Requirements) initVendor(loaderstate *State, vendorList []module.Version) { rs.graphOnce.Do(func() { - roots := MainModules.Versions() - if inWorkspaceMode() { + roots := loaderstate.MainModules.Versions() + if loaderstate.inWorkspaceMode() { // Use rs.rootModules to pull in the go and toolchain roots // from the go.work file and preserve the invariant that all // of rs.rootModules are in mg.g. @@ -186,7 +176,7 @@ func (rs *Requirements) initVendor(vendorList []module.Version) { } if rs.pruning == pruned { - mainModule := MainModules.mustGetSingleMainModule() + mainModule := loaderstate.MainModules.mustGetSingleMainModule(loaderstate) // The roots of a single pruned module should already include every module in the // vendor list, because the vendored modules are the same as those needed // for graph pruning. @@ -194,7 +184,7 @@ func (rs *Requirements) initVendor(vendorList []module.Version) { // Just to be sure, we'll double-check that here. inconsistent := false for _, m := range vendorList { - if v, ok := rs.rootSelected(m.Path); !ok || v != m.Version { + if v, ok := rs.rootSelected(loaderstate, m.Path); !ok || v != m.Version { base.Errorf("go: vendored module %v should be required explicitly in go.mod", m) inconsistent = true } @@ -218,15 +208,15 @@ func (rs *Requirements) initVendor(vendorList []module.Version) { // graph, but still distinguishes between direct and indirect // dependencies. vendorMod := module.Version{Path: "vendor/modules.txt", Version: ""} - if inWorkspaceMode() { - for _, m := range MainModules.Versions() { - reqs, _ := rootsFromModFile(m, MainModules.ModFile(m), omitToolchainRoot) + if loaderstate.inWorkspaceMode() { + for _, m := range loaderstate.MainModules.Versions() { + reqs, _ := rootsFromModFile(loaderstate, m, loaderstate.MainModules.ModFile(m), omitToolchainRoot) mg.g.Require(m, append(reqs, vendorMod)) } mg.g.Require(vendorMod, vendorList) } else { - mainModule := MainModules.mustGetSingleMainModule() + mainModule := loaderstate.MainModules.mustGetSingleMainModule(loaderstate) mg.g.Require(mainModule, append(rs.rootModules, vendorMod)) mg.g.Require(vendorMod, vendorList) } @@ -237,8 +227,8 @@ func (rs *Requirements) initVendor(vendorList []module.Version) { } // GoVersion returns the Go language version for the Requirements. -func (rs *Requirements) GoVersion() string { - v, _ := rs.rootSelected("go") +func (rs *Requirements) GoVersion(loaderstate *State) string { + v, _ := rs.rootSelected(loaderstate, "go") if v == "" { panic("internal error: missing go version in modload.Requirements") } @@ -248,8 +238,8 @@ func (rs *Requirements) GoVersion() string { // rootSelected returns the version of the root dependency with the given module // path, or the zero module.Version and ok=false if the module is not a root // dependency. -func (rs *Requirements) rootSelected(path string) (version string, ok bool) { - if MainModules.Contains(path) { +func (rs *Requirements) rootSelected(loaderstate *State, path string) (version string, ok bool) { + if loaderstate.MainModules.Contains(path) { return "", true } if v, ok := rs.maxRootVersion[path]; ok { @@ -262,9 +252,9 @@ func (rs *Requirements) rootSelected(path string) (version string, ok bool) { // of the same module or a requirement on any version of the main module. // Redundant requirements should be pruned, but they may influence version // selection. -func (rs *Requirements) hasRedundantRoot() bool { +func (rs *Requirements) hasRedundantRoot(loaderstate *State) bool { for i, m := range rs.rootModules { - if MainModules.Contains(m.Path) || (i > 0 && m.Path == rs.rootModules[i-1].Path) { + if loaderstate.MainModules.Contains(m.Path) || (i > 0 && m.Path == rs.rootModules[i-1].Path) { return true } } @@ -279,9 +269,9 @@ func (rs *Requirements) hasRedundantRoot() bool { // // If the requirements of any relevant module fail to load, Graph also // returns a non-nil error of type *mvs.BuildListError. -func (rs *Requirements) Graph(ctx context.Context) (*ModuleGraph, error) { +func (rs *Requirements) Graph(loaderstate *State, ctx context.Context) (*ModuleGraph, error) { rs.graphOnce.Do(func() { - mg, mgErr := readModGraph(ctx, rs.pruning, rs.rootModules, nil) + mg, mgErr := readModGraph(loaderstate, ctx, rs.pruning, rs.rootModules, nil) rs.graph.Store(&cachedGraph{mg, mgErr}) }) cached := rs.graph.Load() @@ -317,7 +307,7 @@ var readModGraphDebugOnce sync.Once // // Unlike LoadModGraph, readModGraph does not attempt to diagnose or update // inconsistent roots. -func readModGraph(ctx context.Context, pruning modPruning, roots []module.Version, unprune map[module.Version]bool) (*ModuleGraph, error) { +func readModGraph(loaderstate *State, ctx context.Context, pruning modPruning, roots []module.Version, unprune map[module.Version]bool) (*ModuleGraph, error) { mustHaveGoRoot(roots) if pruning == pruned { // Enable diagnostics for lazy module loading @@ -343,10 +333,10 @@ func readModGraph(ctx context.Context, pruning modPruning, roots []module.Versio } var graphRoots []module.Version - if inWorkspaceMode() { + if loaderstate.inWorkspaceMode() { graphRoots = roots } else { - graphRoots = MainModules.Versions() + graphRoots = loaderstate.MainModules.Versions() } var ( mu sync.Mutex // guards mg.g and hasError during loading @@ -357,10 +347,10 @@ func readModGraph(ctx context.Context, pruning modPruning, roots []module.Versio ) if pruning != workspace { - if inWorkspaceMode() { + if loaderstate.inWorkspaceMode() { panic("pruning is not workspace in workspace mode") } - mg.g.Require(MainModules.mustGetSingleMainModule(), roots) + mg.g.Require(loaderstate.MainModules.mustGetSingleMainModule(loaderstate), roots) } type dedupKey struct { @@ -377,7 +367,7 @@ func readModGraph(ctx context.Context, pruning modPruning, roots []module.Versio // m's go.mod file indicates that it supports graph pruning. loadOne := func(m module.Version) (*modFileSummary, error) { return mg.loadCache.Do(m, func() (*modFileSummary, error) { - summary, err := goModSummary(m) + summary, err := goModSummary(loaderstate, m) mu.Lock() if err == nil { @@ -537,12 +527,12 @@ func (mg *ModuleGraph) findError() error { return nil } -func (mg *ModuleGraph) allRootsSelected() bool { +func (mg *ModuleGraph) allRootsSelected(loaderstate *State) bool { var roots []module.Version - if inWorkspaceMode() { - roots = MainModules.Versions() + if loaderstate.inWorkspaceMode() { + roots = loaderstate.MainModules.Versions() } else { - roots, _ = mg.g.RequiredBy(MainModules.mustGetSingleMainModule()) + roots, _ = mg.g.RequiredBy(loaderstate.MainModules.mustGetSingleMainModule(loaderstate)) } for _, m := range roots { if mg.Selected(m.Path) != m.Version { @@ -562,14 +552,14 @@ func (mg *ModuleGraph) allRootsSelected() bool { // Modules are loaded automatically (and lazily) in LoadPackages: // LoadModGraph need only be called if LoadPackages is not, // typically in commands that care about modules but no particular package. -func LoadModGraph(ctx context.Context, goVersion string) (*ModuleGraph, error) { - rs, err := loadModFile(ctx, nil) +func LoadModGraph(loaderstate *State, ctx context.Context, goVersion string) (*ModuleGraph, error) { + rs, err := loadModFile(loaderstate, ctx, nil) if err != nil { return nil, err } if goVersion != "" { - v, _ := rs.rootSelected("go") + v, _ := rs.rootSelected(loaderstate, "go") if gover.Compare(v, gover.GoStrictVersion) >= 0 && gover.Compare(goVersion, v) < 0 { return nil, fmt.Errorf("requested Go version %s cannot load module graph (requires Go >= %s)", goVersion, v) } @@ -579,17 +569,17 @@ func LoadModGraph(ctx context.Context, goVersion string) (*ModuleGraph, error) { // Use newRequirements instead of convertDepth because convertDepth // also updates roots; here, we want to report the unmodified roots // even though they may seem inconsistent. - rs = newRequirements(unpruned, rs.rootModules, rs.direct) + rs = newRequirements(loaderstate, unpruned, rs.rootModules, rs.direct) } - return rs.Graph(ctx) + return rs.Graph(loaderstate, ctx) } - rs, mg, err := expandGraph(ctx, rs) + rs, mg, err := expandGraph(loaderstate, ctx, rs) if err != nil { return nil, err } - requirements = rs + loaderstate.requirements = rs return mg, nil } @@ -604,22 +594,22 @@ func LoadModGraph(ctx context.Context, goVersion string) (*ModuleGraph, error) { // from those roots and any error encountered while loading that graph. // expandGraph returns non-nil requirements and a non-nil graph regardless of // errors. On error, the roots might not be updated to be consistent. -func expandGraph(ctx context.Context, rs *Requirements) (*Requirements, *ModuleGraph, error) { - mg, mgErr := rs.Graph(ctx) +func expandGraph(loaderstate *State, ctx context.Context, rs *Requirements) (*Requirements, *ModuleGraph, error) { + mg, mgErr := rs.Graph(loaderstate, ctx) if mgErr != nil { // Without the graph, we can't update the roots: we don't know which // versions of transitive dependencies would be selected. return rs, mg, mgErr } - if !mg.allRootsSelected() { + if !mg.allRootsSelected(loaderstate) { // The roots of rs are not consistent with the rest of the graph. Update // them. In an unpruned module this is a no-op for the build list as a whole — // it just promotes what were previously transitive requirements to be // roots — but in a pruned module it may pull in previously-irrelevant // transitive dependencies. - newRS, rsErr := updateRoots(ctx, rs.direct, rs, nil, nil, false) + newRS, rsErr := updateRoots(loaderstate, ctx, rs.direct, rs, nil, nil, false) if rsErr != nil { // Failed to update roots, perhaps because of an error in a transitive // dependency needed for the update. Return the original Requirements @@ -627,7 +617,7 @@ func expandGraph(ctx context.Context, rs *Requirements) (*Requirements, *ModuleG return rs, mg, rsErr } rs = newRS - mg, mgErr = rs.Graph(ctx) + mg, mgErr = rs.Graph(loaderstate, ctx) } return rs, mg, mgErr @@ -649,16 +639,16 @@ func expandGraph(ctx context.Context, rs *Requirements) (*Requirements, *ModuleG // On success, EditBuildList reports whether the selected version of any module // in the build list may have been changed (possibly to or from "none") as a // result. -func EditBuildList(ctx context.Context, add, mustSelect []module.Version) (changed bool, err error) { - rs, changed, err := editRequirements(ctx, LoadModFile(ctx), add, mustSelect) +func EditBuildList(loaderstate *State, ctx context.Context, add, mustSelect []module.Version) (changed bool, err error) { + rs, changed, err := editRequirements(loaderstate, ctx, LoadModFile(loaderstate, ctx), add, mustSelect) if err != nil { return false, err } - requirements = rs + loaderstate.requirements = rs return changed, nil } -func overrideRoots(ctx context.Context, rs *Requirements, replace []module.Version) *Requirements { +func overrideRoots(loaderstate *State, ctx context.Context, rs *Requirements, replace []module.Version) *Requirements { drop := make(map[string]bool) for _, m := range replace { drop[m.Path] = true @@ -671,7 +661,7 @@ func overrideRoots(ctx context.Context, rs *Requirements, replace []module.Versi } roots = append(roots, replace...) gover.ModSort(roots) - return newRequirements(rs.pruning, roots, rs.direct) + return newRequirements(loaderstate, rs.pruning, roots, rs.direct) } // A ConstraintError describes inconsistent constraints in EditBuildList @@ -775,28 +765,28 @@ func (c Conflict) String() string { // tidyRoots trims the root dependencies to the minimal requirements needed to // both retain the same versions of all packages in pkgs and satisfy the // graph-pruning invariants (if applicable). -func tidyRoots(ctx context.Context, rs *Requirements, pkgs []*loadPkg) (*Requirements, error) { - mainModule := MainModules.mustGetSingleMainModule() +func tidyRoots(loaderstate *State, ctx context.Context, rs *Requirements, pkgs []*loadPkg) (*Requirements, error) { + mainModule := loaderstate.MainModules.mustGetSingleMainModule(loaderstate) if rs.pruning == unpruned { - return tidyUnprunedRoots(ctx, mainModule, rs, pkgs) + return tidyUnprunedRoots(loaderstate, ctx, mainModule, rs, pkgs) } - return tidyPrunedRoots(ctx, mainModule, rs, pkgs) + return tidyPrunedRoots(loaderstate, ctx, mainModule, rs, pkgs) } -func updateRoots(ctx context.Context, direct map[string]bool, rs *Requirements, pkgs []*loadPkg, add []module.Version, rootsImported bool) (*Requirements, error) { +func updateRoots(loaderstate *State, ctx context.Context, direct map[string]bool, rs *Requirements, pkgs []*loadPkg, add []module.Version, rootsImported bool) (*Requirements, error) { switch rs.pruning { case unpruned: - return updateUnprunedRoots(ctx, direct, rs, add) + return updateUnprunedRoots(loaderstate, ctx, direct, rs, add) case pruned: - return updatePrunedRoots(ctx, direct, rs, pkgs, add, rootsImported) + return updatePrunedRoots(loaderstate, ctx, direct, rs, pkgs, add, rootsImported) case workspace: - return updateWorkspaceRoots(ctx, direct, rs, add) + return updateWorkspaceRoots(loaderstate, ctx, direct, rs, add) default: panic(fmt.Sprintf("unsupported pruning mode: %v", rs.pruning)) } } -func updateWorkspaceRoots(ctx context.Context, direct map[string]bool, rs *Requirements, add []module.Version) (*Requirements, error) { +func updateWorkspaceRoots(loaderstate *State, ctx context.Context, direct map[string]bool, rs *Requirements, add []module.Version) (*Requirements, error) { if len(add) != 0 { // add should be empty in workspace mode because workspace mode implies // -mod=readonly, which in turn implies no new requirements. The code path @@ -807,7 +797,7 @@ func updateWorkspaceRoots(ctx context.Context, direct map[string]bool, rs *Requi // return an error. panic("add is not empty") } - return newRequirements(workspace, rs.rootModules, direct), nil + return newRequirements(loaderstate, workspace, rs.rootModules, direct), nil } // tidyPrunedRoots returns a minimal set of root requirements that maintains the @@ -826,16 +816,16 @@ func updateWorkspaceRoots(ctx context.Context, direct map[string]bool, rs *Requi // To ensure that the loading process eventually converges, the caller should // add any needed roots from the tidy root set (without removing existing untidy // roots) until the set of roots has converged. -func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requirements, pkgs []*loadPkg) (*Requirements, error) { +func tidyPrunedRoots(loaderstate *State, ctx context.Context, mainModule module.Version, old *Requirements, pkgs []*loadPkg) (*Requirements, error) { var ( roots []module.Version pathIsRoot = map[string]bool{mainModule.Path: true} ) - if v, ok := old.rootSelected("go"); ok { + if v, ok := old.rootSelected(loaderstate, "go"); ok { roots = append(roots, module.Version{Path: "go", Version: v}) pathIsRoot["go"] = true } - if v, ok := old.rootSelected("toolchain"); ok { + if v, ok := old.rootSelected(loaderstate, "toolchain"); ok { roots = append(roots, module.Version{Path: "toolchain", Version: v}) pathIsRoot["toolchain"] = true } @@ -857,7 +847,7 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir if !pkg.flags.has(pkgInAll) { continue } - if pkg.fromExternalModule() && !pathIsRoot[pkg.mod.Path] { + if pkg.fromExternalModule(loaderstate) && !pathIsRoot[pkg.mod.Path] { roots = append(roots, pkg.mod) pathIsRoot[pkg.mod.Path] = true } @@ -865,11 +855,11 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir queued[pkg] = true } gover.ModSort(roots) - tidy := newRequirements(pruned, roots, old.direct) + tidy := newRequirements(loaderstate, pruned, roots, old.direct) for len(queue) > 0 { roots = tidy.rootModules - mg, err := tidy.Graph(ctx) + mg, err := tidy.Graph(loaderstate, ctx) if err != nil { return nil, err } @@ -902,12 +892,12 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir if len(roots) > len(tidy.rootModules) { gover.ModSort(roots) - tidy = newRequirements(pruned, roots, tidy.direct) + tidy = newRequirements(loaderstate, pruned, roots, tidy.direct) } } roots = tidy.rootModules - _, err := tidy.Graph(ctx) + _, err := tidy.Graph(loaderstate, ctx) if err != nil { return nil, err } @@ -931,8 +921,8 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir pkg := pkg q.Add(func() { skipModFile := true - _, _, _, _, err := importFromModules(ctx, pkg.path, tidy, nil, skipModFile) - if aie := (*AmbiguousImportError)(nil); errors.As(err, &aie) { + _, _, _, _, err := importFromModules(loaderstate, ctx, pkg.path, tidy, nil, skipModFile) + if _, ok := errors.AsType[*AmbiguousImportError](err); ok { disambiguateRoot.Store(pkg.mod, true) } }) @@ -948,8 +938,8 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir if len(roots) > len(tidy.rootModules) { module.Sort(roots) - tidy = newRequirements(pruned, roots, tidy.direct) - _, err = tidy.Graph(ctx) + tidy = newRequirements(loaderstate, pruned, roots, tidy.direct) + _, err = tidy.Graph(loaderstate, ctx) if err != nil { return nil, err } @@ -1009,7 +999,7 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir // // (See https://golang.org/design/36460-lazy-module-loading#invariants for more // detail.) -func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirements, pkgs []*loadPkg, add []module.Version, rootsImported bool) (*Requirements, error) { +func updatePrunedRoots(loaderstate *State, ctx context.Context, direct map[string]bool, rs *Requirements, pkgs []*loadPkg, add []module.Version, rootsImported bool) (*Requirements, error) { roots := rs.rootModules rootsUpgraded := false @@ -1019,7 +1009,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem // either pkgInAll or pkgIsRoot is included as a root.” needSort := false for _, pkg := range pkgs { - if !pkg.fromExternalModule() { + if !pkg.fromExternalModule(loaderstate) { // pkg was not loaded from a module dependency, so we don't need // to do anything special to maintain that dependency. continue @@ -1068,7 +1058,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem continue } - if _, ok := rs.rootSelected(pkg.mod.Path); ok { + if _, ok := rs.rootSelected(loaderstate, pkg.mod.Path); ok { // It is possible that the main module's go.mod file is incomplete or // otherwise erroneous — for example, perhaps the author forgot to 'git // add' their updated go.mod file after adding a new package import, or @@ -1104,7 +1094,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem } for _, m := range add { - if v, ok := rs.rootSelected(m.Path); !ok || gover.ModCompare(m.Path, v, m.Version) < 0 { + if v, ok := rs.rootSelected(loaderstate, m.Path); !ok || gover.ModCompare(m.Path, v, m.Version) < 0 { roots = append(roots, m) rootsUpgraded = true needSort = true @@ -1121,7 +1111,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem // We've added or upgraded one or more roots, so load the full module // graph so that we can update those roots to be consistent with other // requirements. - if mustHaveCompleteRequirements() { + if mustHaveCompleteRequirements(loaderstate) { // Our changes to the roots may have moved dependencies into or out of // the graph-pruning horizon, which could in turn change the selected // versions of other modules. (For pruned modules adding or removing an @@ -1129,9 +1119,9 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem return rs, errGoModDirty } - rs = newRequirements(pruned, roots, direct) + rs = newRequirements(loaderstate, pruned, roots, direct) var err error - mg, err = rs.Graph(ctx) + mg, err = rs.Graph(loaderstate, ctx) if err != nil { return rs, err } @@ -1145,20 +1135,20 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem // We've already loaded the full module graph, which includes the // requirements of all of the root modules — even the transitive // requirements, if they are unpruned! - mg, _ = rs.Graph(ctx) + mg, _ = rs.Graph(loaderstate, ctx) } else if cfg.BuildMod == "vendor" { // We can't spot-check the requirements of other modules because we // don't in general have their go.mod files available in the vendor // directory. (Fortunately this case is impossible, because mg.graph is // always non-nil in vendor mode!) panic("internal error: rs.graph is unexpectedly nil with -mod=vendor") - } else if !spotCheckRoots(ctx, rs, spotCheckRoot) { + } else if !spotCheckRoots(loaderstate, ctx, rs, spotCheckRoot) { // We spot-checked the explicit requirements of the roots that are // relevant to the packages we've loaded. Unfortunately, they're // inconsistent in some way; we need to load the full module graph // so that we can fix the roots properly. var err error - mg, err = rs.Graph(ctx) + mg, err = rs.Graph(loaderstate, ctx) if err != nil { return rs, err } @@ -1168,7 +1158,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem roots = make([]module.Version, 0, len(rs.rootModules)) rootsUpgraded = false inRootPaths := make(map[string]bool, len(rs.rootModules)+1) - for _, mm := range MainModules.Versions() { + for _, mm := range loaderstate.MainModules.Versions() { inRootPaths[mm.Path] = true } for _, m := range rs.rootModules { @@ -1194,7 +1184,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem var v string if mg == nil { - v, _ = rs.rootSelected(m.Path) + v, _ = rs.rootSelected(loaderstate, m.Path) } else { v = mg.Selected(m.Path) } @@ -1228,12 +1218,12 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem // preserve its cached ModuleGraph (if any). return rs, nil } - return newRequirements(pruned, roots, direct), nil + return newRequirements(loaderstate, pruned, roots, direct), nil } // spotCheckRoots reports whether the versions of the roots in rs satisfy the // explicit requirements of the modules in mods. -func spotCheckRoots(ctx context.Context, rs *Requirements, mods map[module.Version]bool) bool { +func spotCheckRoots(loaderstate *State, ctx context.Context, rs *Requirements, mods map[module.Version]bool) bool { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -1245,14 +1235,14 @@ func spotCheckRoots(ctx context.Context, rs *Requirements, mods map[module.Versi return } - summary, err := goModSummary(m) + summary, err := goModSummary(loaderstate, m) if err != nil { cancel() return } for _, r := range summary.require { - if v, ok := rs.rootSelected(r.Path); ok && gover.ModCompare(r.Path, v, r.Version) < 0 { + if v, ok := rs.rootSelected(loaderstate, r.Path); ok && gover.ModCompare(r.Path, v, r.Version) < 0 { cancel() return } @@ -1274,7 +1264,7 @@ func spotCheckRoots(ctx context.Context, rs *Requirements, mods map[module.Versi // the selected version of every module that provided or lexically could have // provided a package in pkgs, and includes the selected version of every such // module in direct as a root. -func tidyUnprunedRoots(ctx context.Context, mainModule module.Version, old *Requirements, pkgs []*loadPkg) (*Requirements, error) { +func tidyUnprunedRoots(loaderstate *State, ctx context.Context, mainModule module.Version, old *Requirements, pkgs []*loadPkg) (*Requirements, error) { var ( // keep is a set of modules that provide packages or are needed to // disambiguate imports. @@ -1302,16 +1292,16 @@ func tidyUnprunedRoots(ctx context.Context, mainModule module.Version, old *Requ // without its sum. See #47738. altMods = map[string]string{} ) - if v, ok := old.rootSelected("go"); ok { + if v, ok := old.rootSelected(loaderstate, "go"); ok { keep = append(keep, module.Version{Path: "go", Version: v}) keptPath["go"] = true } - if v, ok := old.rootSelected("toolchain"); ok { + if v, ok := old.rootSelected(loaderstate, "toolchain"); ok { keep = append(keep, module.Version{Path: "toolchain", Version: v}) keptPath["toolchain"] = true } for _, pkg := range pkgs { - if !pkg.fromExternalModule() { + if !pkg.fromExternalModule(loaderstate) { continue } if m := pkg.mod; !keptPath[m.Path] { @@ -1329,7 +1319,7 @@ func tidyUnprunedRoots(ctx context.Context, mainModule module.Version, old *Requ // Construct a build list with a minimal set of roots. // This may remove or downgrade modules in altMods. - reqs := &mvsReqs{roots: keep} + reqs := &mvsReqs{loaderstate: loaderstate, roots: keep} min, err := mvs.Req(mainModule, rootPaths, reqs) if err != nil { return nil, err @@ -1360,7 +1350,7 @@ func tidyUnprunedRoots(ctx context.Context, mainModule module.Version, old *Requ } } - return newRequirements(unpruned, min, old.direct), nil + return newRequirements(loaderstate, unpruned, min, old.direct), nil } // updateUnprunedRoots returns a set of root requirements that includes the selected @@ -1377,8 +1367,8 @@ func tidyUnprunedRoots(ctx context.Context, mainModule module.Version, old *Requ // by a dependency in add. // 4. Every version in add is selected at its given version unless upgraded by // (the dependencies of) an existing root or another module in add. -func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requirements, add []module.Version) (*Requirements, error) { - mg, err := rs.Graph(ctx) +func updateUnprunedRoots(loaderstate *State, ctx context.Context, direct map[string]bool, rs *Requirements, add []module.Version) (*Requirements, error) { + mg, err := rs.Graph(loaderstate, ctx) if err != nil { // We can't ignore errors in the module graph even if the user passed the -e // flag to try to push past them. If we can't load the complete module @@ -1386,7 +1376,7 @@ func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requir return rs, err } - if mustHaveCompleteRequirements() { + if mustHaveCompleteRequirements(loaderstate) { // Instead of actually updating the requirements, just check that no updates // are needed. if rs == nil { @@ -1406,7 +1396,7 @@ func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requir } } for mPath := range direct { - if _, ok := rs.rootSelected(mPath); !ok { + if _, ok := rs.rootSelected(loaderstate, mPath); !ok { // Module m is supposed to be listed explicitly, but isn't. // // Note that this condition is also detected (and logged with more @@ -1445,7 +1435,7 @@ func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requir // This is only for convenience and clarity for end users: in an unpruned module, // the choice of explicit vs. implicit dependency has no impact on MVS // selection (for itself or any other module). - keep := append(mg.BuildList()[MainModules.Len():], add...) + keep := append(mg.BuildList()[loaderstate.MainModules.Len():], add...) for _, m := range keep { if direct[m.Path] && !inRootPaths[m.Path] { rootPaths = append(rootPaths, m.Path) @@ -1454,14 +1444,14 @@ func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requir } var roots []module.Version - for _, mainModule := range MainModules.Versions() { - min, err := mvs.Req(mainModule, rootPaths, &mvsReqs{roots: keep}) + for _, mainModule := range loaderstate.MainModules.Versions() { + min, err := mvs.Req(mainModule, rootPaths, &mvsReqs{loaderstate: loaderstate, roots: keep}) if err != nil { return rs, err } roots = append(roots, min...) } - if MainModules.Len() > 1 { + if loaderstate.MainModules.Len() > 1 { gover.ModSort(roots) } if rs.pruning == unpruned && slices.Equal(roots, rs.rootModules) && maps.Equal(direct, rs.direct) { @@ -1470,12 +1460,12 @@ func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requir return rs, nil } - return newRequirements(unpruned, roots, direct), nil + return newRequirements(loaderstate, unpruned, roots, direct), nil } // convertPruning returns a version of rs with the given pruning behavior. // If rs already has the given pruning, convertPruning returns rs unmodified. -func convertPruning(ctx context.Context, rs *Requirements, pruning modPruning) (*Requirements, error) { +func convertPruning(loaderstate *State, ctx context.Context, rs *Requirements, pruning modPruning) (*Requirements, error) { if rs.pruning == pruning { return rs, nil } else if rs.pruning == workspace || pruning == workspace { @@ -1487,7 +1477,7 @@ func convertPruning(ctx context.Context, rs *Requirements, pruning modPruning) ( // pruned module graph are a superset of the roots of an unpruned one, so // we don't need to add any new roots — we just need to drop the ones that // are redundant, which is exactly what updateUnprunedRoots does. - return updateUnprunedRoots(ctx, rs.direct, rs, nil) + return updateUnprunedRoots(loaderstate, ctx, rs.direct, rs, nil) } // We are converting an unpruned module to a pruned one. @@ -1497,9 +1487,9 @@ func convertPruning(ctx context.Context, rs *Requirements, pruning modPruning) ( // root set! “Include the transitive dependencies of every module in the build // list” is exactly what happens in a pruned module if we promote every module // in the build list to a root. - mg, err := rs.Graph(ctx) + mg, err := rs.Graph(loaderstate, ctx) if err != nil { return rs, err } - return newRequirements(pruned, mg.BuildList()[MainModules.Len():], rs.direct), nil + return newRequirements(loaderstate, pruned, mg.BuildList()[loaderstate.MainModules.Len():], rs.direct), nil } diff --git a/src/cmd/go/internal/modload/edit.go b/src/cmd/go/internal/modload/edit.go index b406193dc5a..1996b7c26b0 100644 --- a/src/cmd/go/internal/modload/edit.go +++ b/src/cmd/go/internal/modload/edit.go @@ -42,7 +42,7 @@ import ( // If pruning is enabled, the roots of the edited requirements include an // explicit entry for each module path in tryUpgrade, mustSelect, and the roots // of rs, unless the selected version for the module path is "none". -func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSelect []module.Version) (edited *Requirements, changed bool, err error) { +func editRequirements(loaderstate *State, ctx context.Context, rs *Requirements, tryUpgrade, mustSelect []module.Version) (edited *Requirements, changed bool, err error) { if rs.pruning == workspace { panic("editRequirements cannot edit workspace requirements") } @@ -82,7 +82,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel } if rootPruning != rs.pruning { - rs, err = convertPruning(ctx, rs, rootPruning) + rs, err = convertPruning(loaderstate, ctx, rs, rootPruning) if err != nil { return orig, false, err } @@ -100,13 +100,13 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel // dependencies, so we need to treat everything in the build list as // potentially relevant — that is, as what would be a “root” in a module // with graph pruning enabled. - mg, err := rs.Graph(ctx) + mg, err := rs.Graph(loaderstate, ctx) if err != nil { // If we couldn't load the graph, we don't know what its requirements were // to begin with, so we can't edit those requirements in a coherent way. return orig, false, err } - bl := mg.BuildList()[MainModules.Len():] + bl := mg.BuildList()[loaderstate.MainModules.Len():] selectedRoot = make(map[string]string, len(bl)) for _, m := range bl { selectedRoot[m.Path] = m.Version @@ -224,10 +224,12 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel // of every root. The upgraded roots are in addition to the original // roots, so we will have enough information to trace a path to each // conflict we discover from one or more of the original roots. - mg, upgradedRoots, err := extendGraph(ctx, rootPruning, roots, selectedRoot) + mg, upgradedRoots, err := extendGraph(loaderstate, ctx, rootPruning, roots, selectedRoot) if err != nil { - var tooNew *gover.TooNewError - if mg == nil || errors.As(err, &tooNew) { + if mg == nil { + return orig, false, err + } + if _, ok := errors.AsType[*gover.TooNewError](err); ok { return orig, false, err } // We're about to walk the entire extended module graph, so we will find @@ -389,7 +391,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel // the edit. We want to make sure we consider keeping it as-is, // even if it wouldn't normally be included. (For example, it might // be a pseudo-version or pre-release.) - origMG, _ := orig.Graph(ctx) + origMG, _ := orig.Graph(loaderstate, ctx) origV := origMG.Selected(m.Path) if conflict.Err != nil && origV == m.Version { @@ -413,7 +415,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel rejectedRoot[m] = true prev := m for { - prev, err = previousVersion(ctx, prev) + prev, err = previousVersion(loaderstate, ctx, prev) if gover.ModCompare(m.Path, m.Version, origV) > 0 && (gover.ModCompare(m.Path, prev.Version, origV) < 0 || err != nil) { // previousVersion skipped over origV. Insert it into the order. prev.Version = origV @@ -513,13 +515,13 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel // The modules in mustSelect are always promoted to be explicit. for _, m := range mustSelect { - if m.Version != "none" && !MainModules.Contains(m.Path) { + if m.Version != "none" && !loaderstate.MainModules.Contains(m.Path) { rootPaths = append(rootPaths, m.Path) } } for _, m := range roots { - if v, ok := rs.rootSelected(m.Path); ok && (v == m.Version || rs.direct[m.Path]) { + if v, ok := rs.rootSelected(loaderstate, m.Path); ok && (v == m.Version || rs.direct[m.Path]) { // m.Path was formerly a root, and either its version hasn't changed or // we believe that it provides a package directly imported by a package // or test in the main module. For now we'll assume that it is still @@ -530,7 +532,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel } } - roots, err = mvs.Req(MainModules.mustGetSingleMainModule(), rootPaths, &mvsReqs{roots: roots}) + roots, err = mvs.Req(loaderstate.MainModules.mustGetSingleMainModule(loaderstate), rootPaths, &mvsReqs{loaderstate: loaderstate, roots: roots}) if err != nil { return nil, false, err } @@ -561,7 +563,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel direct[m.Path] = true } } - edited = newRequirements(rootPruning, roots, direct) + edited = newRequirements(loaderstate, rootPruning, roots, direct) // If we ended up adding a dependency that upgrades our go version far enough // to activate pruning, we must convert the edited Requirements in order to @@ -576,7 +578,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel // those two modules will never be downgraded due to a conflict with any other // constraint. if rootPruning == unpruned { - if v, ok := edited.rootSelected("go"); ok && pruningForGoVersion(v) == pruned { + if v, ok := edited.rootSelected(loaderstate, "go"); ok && pruningForGoVersion(v) == pruned { // Since we computed the edit with the unpruned graph, and the pruned // graph is a strict subset of the unpruned graph, this conversion // preserves the exact (edited) build list that we already computed. @@ -585,7 +587,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel // the graph. 'go get' will check for that sort of transition and log a // message reminding the user how to clean up this mess we're about to // make. 😅 - edited, err = convertPruning(ctx, edited, pruned) + edited, err = convertPruning(loaderstate, ctx, edited, pruned) if err != nil { return orig, false, err } @@ -605,9 +607,9 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel // The extended graph is useful for diagnosing version conflicts: for each // selected module version, it can provide a complete path of requirements from // some root to that version. -func extendGraph(ctx context.Context, rootPruning modPruning, roots []module.Version, selectedRoot map[string]string) (mg *ModuleGraph, upgradedRoot map[module.Version]bool, err error) { +func extendGraph(loaderstate *State, ctx context.Context, rootPruning modPruning, roots []module.Version, selectedRoot map[string]string) (mg *ModuleGraph, upgradedRoot map[module.Version]bool, err error) { for { - mg, err = readModGraph(ctx, rootPruning, roots, upgradedRoot) + mg, err = readModGraph(loaderstate, ctx, rootPruning, roots, upgradedRoot) // We keep on going even if err is non-nil until we reach a steady state. // (Note that readModGraph returns a non-nil *ModuleGraph even in case of // errors.) The caller may be able to fix the errors by adjusting versions, diff --git a/src/cmd/go/internal/modload/import.go b/src/cmd/go/internal/modload/import.go index 171d9d692fb..0ec4102cc61 100644 --- a/src/cmd/go/internal/modload/import.go +++ b/src/cmd/go/internal/modload/import.go @@ -29,10 +29,15 @@ import ( ) type ImportMissingError struct { - Path string - Module module.Version - QueryErr error + Path string + Module module.Version + QueryErr error + modContainingCWD module.Version + allowMissingModuleImports bool + // modRoot is dependent on the value of ImportingMainModule and should be + // kept in sync. + modRoot string ImportingMainModule module.Version // isStd indicates whether we would expect to find the package in the standard @@ -63,10 +68,10 @@ func (e *ImportMissingError) Error() string { } return msg } - if e.QueryErr != nil && e.QueryErr != ErrNoModRoot { + if e.QueryErr != nil && !errors.Is(e.QueryErr, ErrNoModRoot) { return fmt.Sprintf("cannot find module providing package %s: %v", e.Path, e.QueryErr) } - if cfg.BuildMod == "mod" || (cfg.BuildMod == "readonly" && allowMissingModuleImports) { + if cfg.BuildMod == "mod" || (cfg.BuildMod == "readonly" && e.allowMissingModuleImports) { return "cannot find module providing package " + e.Path } @@ -82,8 +87,8 @@ func (e *ImportMissingError) Error() string { if e.QueryErr != nil { return fmt.Sprintf("%s: %v", message, e.QueryErr) } - if e.ImportingMainModule.Path != "" && e.ImportingMainModule != MainModules.ModContainingCWD() { - return fmt.Sprintf("%s; to add it:\n\tcd %s\n\tgo get %s", message, MainModules.ModRoot(e.ImportingMainModule), e.Path) + if e.ImportingMainModule.Path != "" && e.ImportingMainModule != e.modContainingCWD { + return fmt.Sprintf("%s; to add it:\n\tcd %s\n\tgo get %s", message, e.modRoot, e.Path) } return fmt.Sprintf("%s; to add it:\n\tgo get %s", message, e.Path) } @@ -262,8 +267,8 @@ func (e *invalidImportError) Unwrap() error { // (https://go.dev/issue/56222) for modules with 'go' versions between 1.17 and // 1.20, preventing unnecessary go.sum churn and network access in those // modules. -func importFromModules(ctx context.Context, path string, rs *Requirements, mg *ModuleGraph, skipModFile bool) (m module.Version, modroot, dir string, altMods []module.Version, err error) { - invalidf := func(format string, args ...interface{}) (module.Version, string, string, []module.Version, error) { +func importFromModules(loaderstate *State, ctx context.Context, path string, rs *Requirements, mg *ModuleGraph, skipModFile bool) (m module.Version, modroot, dir string, altMods []module.Version, err error) { + invalidf := func(format string, args ...any) (module.Version, string, string, []module.Version, error) { return module.Version{}, "", "", nil, &invalidImportError{ importPath: path, err: fmt.Errorf(format, args...), @@ -299,12 +304,12 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M // Is the package in the standard library? pathIsStd := search.IsStandardImportPath(path) if pathIsStd && modindex.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) { - for _, mainModule := range MainModules.Versions() { - if MainModules.InGorootSrc(mainModule) { - if dir, ok, err := dirInModule(path, MainModules.PathPrefix(mainModule), MainModules.ModRoot(mainModule), true); err != nil { - return module.Version{}, MainModules.ModRoot(mainModule), dir, nil, err + for _, mainModule := range loaderstate.MainModules.Versions() { + if loaderstate.MainModules.InGorootSrc(mainModule) { + if dir, ok, err := dirInModule(path, loaderstate.MainModules.PathPrefix(mainModule), loaderstate.MainModules.ModRoot(mainModule), true); err != nil { + return module.Version{}, loaderstate.MainModules.ModRoot(mainModule), dir, nil, err } else if ok { - return mainModule, MainModules.ModRoot(mainModule), dir, nil, nil + return mainModule, loaderstate.MainModules.ModRoot(mainModule), dir, nil, nil } } } @@ -321,10 +326,10 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M // Everything must be in the main modules or the main module's or workspace's vendor directory. if cfg.BuildMod == "vendor" { var mainErr error - for _, mainModule := range MainModules.Versions() { - modRoot := MainModules.ModRoot(mainModule) + for _, mainModule := range loaderstate.MainModules.Versions() { + modRoot := loaderstate.MainModules.ModRoot(mainModule) if modRoot != "" { - dir, mainOK, err := dirInModule(path, MainModules.PathPrefix(mainModule), modRoot, true) + dir, mainOK, err := dirInModule(path, loaderstate.MainModules.PathPrefix(mainModule), modRoot, true) if mainErr == nil { mainErr = err } @@ -336,8 +341,8 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M } } - if HasModRoot() { - vendorDir := VendorDir() + if loaderstate.HasModRoot() { + vendorDir := VendorDir(loaderstate) dir, inVendorDir, _ := dirInModule(path, "", vendorDir, false) if inVendorDir { readVendorList(vendorDir) @@ -345,13 +350,13 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M // vendor/modules.txt does not exist or the user manually added directories to the vendor directory. // Go 1.23 and later require vendored packages to be present in modules.txt to be imported. _, ok := vendorPkgModule[path] - if ok || (gover.Compare(MainModules.GoVersion(), gover.ExplicitModulesTxtImportVersion) < 0) { + if ok || (gover.Compare(loaderstate.MainModules.GoVersion(loaderstate), gover.ExplicitModulesTxtImportVersion) < 0) { mods = append(mods, vendorPkgModule[path]) dirs = append(dirs, dir) roots = append(roots, vendorDir) } else { subCommand := "mod" - if inWorkspaceMode() { + if loaderstate.inWorkspaceMode() { subCommand = "work" } fmt.Fprintf(os.Stderr, "go: ignoring package %s which exists in the vendor directory but is missing from vendor/modules.txt. To sync the vendor directory run go %s vendor.\n", path, subCommand) @@ -368,7 +373,11 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M } if len(mods) == 0 { - return module.Version{}, "", "", nil, &ImportMissingError{Path: path} + return module.Version{}, "", "", nil, &ImportMissingError{ + Path: path, + modContainingCWD: loaderstate.MainModules.ModContainingCWD(), + allowMissingModuleImports: loaderstate.allowMissingModuleImports, + } } return mods[0], roots[0], dirs[0], nil, nil @@ -399,7 +408,7 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M ok bool ) if mg == nil { - v, ok = rs.rootSelected(prefix) + v, ok = rs.rootSelected(loaderstate, prefix) } else { v, ok = mg.Selected(prefix), true } @@ -408,9 +417,9 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M } m := module.Version{Path: prefix, Version: v} - root, isLocal, err := fetch(ctx, m) + root, isLocal, err := fetch(loaderstate, ctx, m) if err != nil { - if sumErr := (*sumMissingError)(nil); errors.As(err, &sumErr) { + if _, ok := errors.AsType[*sumMissingError](err); ok { // We are missing a sum needed to fetch a module in the build list. // We can't verify that the package is unique, and we may not find // the package at all. Keep checking other modules to decide which @@ -471,8 +480,8 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M // If the module graph is pruned and this is a test-only dependency // of a package in "all", we didn't necessarily load that file // when we read the module graph, so do it now to be sure. - if !skipModFile && cfg.BuildMod != "vendor" && mods[0].Path != "" && !MainModules.Contains(mods[0].Path) { - if _, err := goModSummary(mods[0]); err != nil { + if !skipModFile && cfg.BuildMod != "vendor" && mods[0].Path != "" && !loaderstate.MainModules.Contains(mods[0].Path) { + if _, err := goModSummary(loaderstate, mods[0]); err != nil { return module.Version{}, "", "", nil, err } } @@ -483,15 +492,21 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M // We checked the full module graph and still didn't find the // requested package. var queryErr error - if !HasModRoot() { - queryErr = ErrNoModRoot + if !loaderstate.HasModRoot() { + queryErr = NewNoMainModulesError(loaderstate) + } + return module.Version{}, "", "", nil, &ImportMissingError{ + Path: path, + QueryErr: queryErr, + isStd: pathIsStd, + modContainingCWD: loaderstate.MainModules.ModContainingCWD(), + allowMissingModuleImports: loaderstate.allowMissingModuleImports, } - return module.Version{}, "", "", nil, &ImportMissingError{Path: path, QueryErr: queryErr, isStd: pathIsStd} } // So far we've checked the root dependencies. // Load the full module graph and try again. - mg, err = rs.Graph(ctx) + mg, err = rs.Graph(loaderstate, ctx) if err != nil { // We might be missing one or more transitive (implicit) dependencies from // the module graph, so we can't return an ImportMissingError here — one @@ -507,12 +522,12 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M // // Unlike QueryPattern, queryImport prefers to add a replaced version of a // module *before* checking the proxies for a version to add. -func queryImport(ctx context.Context, path string, rs *Requirements) (module.Version, error) { +func queryImport(loaderstate *State, ctx context.Context, path string, rs *Requirements) (module.Version, error) { // To avoid spurious remote fetches, try the latest replacement for each // module (golang.org/issue/26241). var mods []module.Version - if MainModules != nil { // TODO(#48912): Ensure MainModules exists at this point, and remove the check. - for mp, mv := range MainModules.HighestReplaced() { + if loaderstate.MainModules != nil { // TODO(#48912): Ensure MainModules exists at this point, and remove the check. + for mp, mv := range loaderstate.MainModules.HighestReplaced() { if !maybeInModule(path, mp) { continue } @@ -528,7 +543,7 @@ func queryImport(ctx context.Context, path string, rs *Requirements) (module.Ver mv = module.ZeroPseudoVersion("v0") } } - mg, err := rs.Graph(ctx) + mg, err := rs.Graph(loaderstate, ctx) if err != nil { return module.Version{}, err } @@ -547,9 +562,9 @@ func queryImport(ctx context.Context, path string, rs *Requirements) (module.Ver return len(mods[i].Path) > len(mods[j].Path) }) for _, m := range mods { - root, isLocal, err := fetch(ctx, m) + root, isLocal, err := fetch(loaderstate, ctx, m) if err != nil { - if sumErr := (*sumMissingError)(nil); errors.As(err, &sumErr) { + if _, ok := errors.AsType[*sumMissingError](err); ok { return module.Version{}, &ImportMissingSumError{importPath: path} } return module.Version{}, err @@ -558,7 +573,12 @@ func queryImport(ctx context.Context, path string, rs *Requirements) (module.Ver return m, err } else if ok { if cfg.BuildMod == "readonly" { - return module.Version{}, &ImportMissingError{Path: path, replaced: m} + return module.Version{}, &ImportMissingError{ + Path: path, + replaced: m, + modContainingCWD: loaderstate.MainModules.ModContainingCWD(), + allowMissingModuleImports: loaderstate.allowMissingModuleImports, + } } return m, nil } @@ -567,7 +587,7 @@ func queryImport(ctx context.Context, path string, rs *Requirements) (module.Ver // The package path is not valid to fetch remotely, // so it can only exist in a replaced module, // and we know from the above loop that it is not. - replacement := Replacement(mods[0]) + replacement := Replacement(loaderstate, mods[0]) return module.Version{}, &PackageNotInModuleError{ Mod: mods[0], Query: "latest", @@ -584,10 +604,15 @@ func queryImport(ctx context.Context, path string, rs *Requirements) (module.Ver // QueryPattern cannot possibly find a module containing this package. // // Instead of trying QueryPattern, report an ImportMissingError immediately. - return module.Version{}, &ImportMissingError{Path: path, isStd: true} + return module.Version{}, &ImportMissingError{ + Path: path, + isStd: true, + modContainingCWD: loaderstate.MainModules.ModContainingCWD(), + allowMissingModuleImports: loaderstate.allowMissingModuleImports, + } } - if (cfg.BuildMod == "readonly" || cfg.BuildMod == "vendor") && !allowMissingModuleImports { + if (cfg.BuildMod == "readonly" || cfg.BuildMod == "vendor") && !loaderstate.allowMissingModuleImports { // In readonly mode, we can't write go.mod, so we shouldn't try to look up // the module. If readonly mode was enabled explicitly, include that in // the error message. @@ -599,7 +624,12 @@ func queryImport(ctx context.Context, path string, rs *Requirements) (module.Ver } else if cfg.BuildModReason != "" { queryErr = fmt.Errorf("import lookup disabled by -mod=%s\n\t(%s)", cfg.BuildMod, cfg.BuildModReason) } - return module.Version{}, &ImportMissingError{Path: path, QueryErr: queryErr} + return module.Version{}, &ImportMissingError{ + Path: path, + QueryErr: queryErr, + modContainingCWD: loaderstate.MainModules.ModContainingCWD(), + allowMissingModuleImports: loaderstate.allowMissingModuleImports, + } } // Look up module containing the package, for addition to the build list. @@ -607,17 +637,22 @@ func queryImport(ctx context.Context, path string, rs *Requirements) (module.Ver // and return m, dir, ImportMissingError. fmt.Fprintf(os.Stderr, "go: finding module for package %s\n", path) - mg, err := rs.Graph(ctx) + mg, err := rs.Graph(loaderstate, ctx) if err != nil { return module.Version{}, err } - candidates, err := QueryPackages(ctx, path, "latest", mg.Selected, CheckAllowed) + candidates, err := QueryPackages(loaderstate, ctx, path, "latest", mg.Selected, loaderstate.CheckAllowed) if err != nil { if errors.Is(err, fs.ErrNotExist) { // Return "cannot find module providing package […]" instead of whatever // low-level error QueryPattern produced. - return module.Version{}, &ImportMissingError{Path: path, QueryErr: err} + return module.Version{}, &ImportMissingError{ + Path: path, + QueryErr: err, + modContainingCWD: loaderstate.MainModules.ModContainingCWD(), + allowMissingModuleImports: loaderstate.allowMissingModuleImports, + } } else { return module.Version{}, err } @@ -642,9 +677,11 @@ func queryImport(ctx context.Context, path string, rs *Requirements) (module.Ver return c.Mod, nil } return module.Version{}, &ImportMissingError{ - Path: path, - Module: candidates[0].Mod, - newMissingVersion: candidate0MissingVersion, + Path: path, + Module: candidates[0].Mod, + newMissingVersion: candidate0MissingVersion, + modContainingCWD: loaderstate.MainModules.ModContainingCWD(), + allowMissingModuleImports: loaderstate.allowMissingModuleImports, } } @@ -747,15 +784,15 @@ func dirInModule(path, mpath, mdir string, isLocal bool) (dir string, haveGoFile // // The isLocal return value reports whether the replacement, // if any, is local to the filesystem. -func fetch(ctx context.Context, mod module.Version) (dir string, isLocal bool, err error) { - if modRoot := MainModules.ModRoot(mod); modRoot != "" { +func fetch(loaderstate *State, ctx context.Context, mod module.Version) (dir string, isLocal bool, err error) { + if modRoot := loaderstate.MainModules.ModRoot(mod); modRoot != "" { return modRoot, true, nil } - if r := Replacement(mod); r.Path != "" { + if r := Replacement(loaderstate, mod); r.Path != "" { if r.Version == "" { dir = r.Path if !filepath.IsAbs(dir) { - dir = filepath.Join(replaceRelativeTo(), dir) + dir = filepath.Join(replaceRelativeTo(loaderstate), dir) } // Ensure that the replacement directory actually exists: // dirInModule does not report errors for missing modules, @@ -780,7 +817,7 @@ func fetch(ctx context.Context, mod module.Version) (dir string, isLocal bool, e mod = r } - if mustHaveSums() && !modfetch.HaveSum(mod) { + if mustHaveSums(loaderstate) && !modfetch.HaveSum(mod) { return "", false, module.VersionError(mod, &sumMissingError{}) } @@ -790,8 +827,8 @@ func fetch(ctx context.Context, mod module.Version) (dir string, isLocal bool, e // mustHaveSums reports whether we require that all checksums // needed to load or build packages are already present in the go.sum file. -func mustHaveSums() bool { - return HasModRoot() && cfg.BuildMod == "readonly" && !inWorkspaceMode() +func mustHaveSums(loaderstate *State) bool { + return loaderstate.HasModRoot() && cfg.BuildMod == "readonly" && !loaderstate.inWorkspaceMode() } type sumMissingError struct { diff --git a/src/cmd/go/internal/modload/import_test.go b/src/cmd/go/internal/modload/import_test.go index eb4f5d64d3a..820fb87b592 100644 --- a/src/cmd/go/internal/modload/import_test.go +++ b/src/cmd/go/internal/modload/import_test.go @@ -56,25 +56,20 @@ var importTests = []struct { } func TestQueryImport(t *testing.T) { + loaderstate := NewState() + loaderstate.RootMode = NoRoot + loaderstate.AllowMissingModuleImports() + testenv.MustHaveExternalNetwork(t) testenv.MustHaveExecPath(t, "git") - oldAllowMissingModuleImports := allowMissingModuleImports - oldRootMode := RootMode - defer func() { - allowMissingModuleImports = oldAllowMissingModuleImports - RootMode = oldRootMode - }() - allowMissingModuleImports = true - RootMode = NoRoot - ctx := context.Background() - rs := LoadModFile(ctx) + rs := LoadModFile(loaderstate, ctx) for _, tt := range importTests { t.Run(strings.ReplaceAll(tt.path, "/", "_"), func(t *testing.T) { // Note that there is no build list, so Import should always fail. - m, err := queryImport(ctx, tt.path, rs) + m, err := queryImport(loaderstate, ctx, tt.path, rs) if tt.err == "" { if err != nil { diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go index 498ff7433ea..bbdd0e95b5c 100644 --- a/src/cmd/go/internal/modload/init.go +++ b/src/cmd/go/internal/modload/init.go @@ -38,15 +38,6 @@ import ( // // TODO(#40775): See if these can be plumbed as explicit parameters. var ( - // RootMode determines whether a module root is needed. - RootMode Root - - // ForceUseModules may be set to force modules to be enabled when - // GO111MODULE=auto or to report an error when GO111MODULE=off. - ForceUseModules bool - - allowMissingModuleImports bool - // ExplicitWriteGoMod prevents LoadPackages, ListModules, and other functions // from updating go.mod and go.sum or reporting errors when updates are // needed. A package should set this if it would cause go.mod to be written @@ -60,64 +51,50 @@ var ( // Variables set in Init. var ( - initialized bool - - // These are primarily used to initialize the MainModules, and should be - // eventually superseded by them but are still used in cases where the module - // roots are required but MainModules hasn't been initialized yet. Set to - // the modRoots of the main modules. - // modRoots != nil implies len(modRoots) > 0 - modRoots []string - gopath string + gopath string ) // EnterModule resets MainModules and requirements to refer to just this one module. -func EnterModule(ctx context.Context, enterModroot string) { - MainModules = nil // reset MainModules - requirements = nil - workFilePath = "" // Force module mode +func EnterModule(loaderstate *State, ctx context.Context, enterModroot string) { + loaderstate.MainModules = nil // reset MainModules + loaderstate.requirements = nil + loaderstate.workFilePath = "" // Force module mode modfetch.Reset() - modRoots = []string{enterModroot} - LoadModFile(ctx) + loaderstate.modRoots = []string{enterModroot} + LoadModFile(loaderstate, ctx) } // EnterWorkspace enters workspace mode from module mode, applying the updated requirements to the main // module to that module in the workspace. There should be no calls to any of the exported // functions of the modload package running concurrently with a call to EnterWorkspace as // EnterWorkspace will modify the global state they depend on in a non-thread-safe way. -func EnterWorkspace(ctx context.Context) (exit func(), err error) { +func EnterWorkspace(loaderstate *State, ctx context.Context) (exit func(), err error) { // Find the identity of the main module that will be updated before we reset modload state. - mm := MainModules.mustGetSingleMainModule() + mm := loaderstate.MainModules.mustGetSingleMainModule(loaderstate) // Get the updated modfile we will use for that module. - _, _, updatedmodfile, err := UpdateGoModFromReqs(ctx, WriteOpts{}) + _, _, updatedmodfile, err := UpdateGoModFromReqs(loaderstate, ctx, WriteOpts{}) if err != nil { return nil, err } // Reset the state to a clean state. - oldstate := setState(state{}) - ForceUseModules = true + oldstate := loaderstate.setState(State{}) + loaderstate.ForceUseModules = true // Load in workspace mode. - InitWorkfile() - LoadModFile(ctx) + loaderstate.InitWorkfile() + LoadModFile(loaderstate, ctx) // Update the content of the previous main module, and recompute the requirements. - *MainModules.ModFile(mm) = *updatedmodfile - requirements = requirementsFromModFiles(ctx, MainModules.workFile, slices.Collect(maps.Values(MainModules.modFiles)), nil) + *loaderstate.MainModules.ModFile(mm) = *updatedmodfile + loaderstate.requirements = requirementsFromModFiles(loaderstate, ctx, loaderstate.MainModules.workFile, slices.Collect(maps.Values(loaderstate.MainModules.modFiles)), nil) return func() { - setState(oldstate) + loaderstate.setState(oldstate) }, nil } -// Variable set in InitWorkfile -var ( - // Set to the path to the go.work file, or "" if workspace mode is disabled. - workFilePath string -) - type MainModuleSet struct { // versions are the module.Version values of each of the main modules. // For each of them, the Path fields are ordinary module paths and the Version @@ -203,28 +180,36 @@ func (mms *MainModuleSet) InGorootSrc(m module.Version) bool { return mms.inGorootSrc[m] } -func (mms *MainModuleSet) mustGetSingleMainModule() module.Version { - if mms == nil || len(mms.versions) == 0 { - panic("internal error: mustGetSingleMainModule called in context with no main modules") +func (mms *MainModuleSet) mustGetSingleMainModule(loaderstate *State) module.Version { + mm, err := mms.getSingleMainModule(loaderstate) + if err != nil { + panic(err) } - if len(mms.versions) != 1 { - if inWorkspaceMode() { - panic("internal error: mustGetSingleMainModule called in workspace mode") - } else { - panic("internal error: multiple main modules present outside of workspace mode") - } - } - return mms.versions[0] + return mm } -func (mms *MainModuleSet) GetSingleIndexOrNil() *modFileIndex { +func (mms *MainModuleSet) getSingleMainModule(loaderstate *State) (module.Version, error) { + if mms == nil || len(mms.versions) == 0 { + return module.Version{}, errors.New("internal error: mustGetSingleMainModule called in context with no main modules") + } + if len(mms.versions) != 1 { + if loaderstate.inWorkspaceMode() { + return module.Version{}, errors.New("internal error: mustGetSingleMainModule called in workspace mode") + } else { + return module.Version{}, errors.New("internal error: multiple main modules present outside of workspace mode") + } + } + return mms.versions[0], nil +} + +func (mms *MainModuleSet) GetSingleIndexOrNil(loaderstate *State) *modFileIndex { if mms == nil { return nil } if len(mms.versions) == 0 { return nil } - return mms.indices[mms.mustGetSingleMainModule()] + return mms.indices[mms.mustGetSingleMainModule(loaderstate)] } func (mms *MainModuleSet) Index(m module.Version) *modFileIndex { @@ -267,12 +252,12 @@ func (mms *MainModuleSet) HighestReplaced() map[string]string { // GoVersion returns the go version set on the single module, in module mode, // or the go.work file in workspace mode. -func (mms *MainModuleSet) GoVersion() string { - if inWorkspaceMode() { +func (mms *MainModuleSet) GoVersion(loaderstate *State) string { + if loaderstate.inWorkspaceMode() { return gover.FromGoWork(mms.workFile) } if mms != nil && len(mms.versions) == 1 { - f := mms.ModFile(mms.mustGetSingleMainModule()) + f := mms.ModFile(mms.mustGetSingleMainModule(loaderstate)) if f == nil { // Special case: we are outside a module, like 'go run x.go'. // Assume the local Go version. @@ -287,15 +272,15 @@ func (mms *MainModuleSet) GoVersion() string { // Godebugs returns the godebug lines set on the single module, in module mode, // or on the go.work file in workspace mode. // The caller must not modify the result. -func (mms *MainModuleSet) Godebugs() []*modfile.Godebug { - if inWorkspaceMode() { +func (mms *MainModuleSet) Godebugs(loaderstate *State) []*modfile.Godebug { + if loaderstate.inWorkspaceMode() { if mms.workFile != nil { return mms.workFile.Godebug } return nil } if mms != nil && len(mms.versions) == 1 { - f := mms.ModFile(mms.mustGetSingleMainModule()) + f := mms.ModFile(mms.mustGetSingleMainModule(loaderstate)) if f == nil { // Special case: we are outside a module, like 'go run x.go'. return nil @@ -309,8 +294,6 @@ func (mms *MainModuleSet) WorkFileReplaceMap() map[module.Version]module.Version return mms.workFileReplaceMap } -var MainModules *MainModuleSet - type Root int const ( @@ -337,17 +320,17 @@ const ( // will be lost at the next call to WriteGoMod. // To make permanent changes to the require statements // in go.mod, edit it before loading. -func ModFile() *modfile.File { - Init() - modFile := MainModules.ModFile(MainModules.mustGetSingleMainModule()) +func ModFile(loaderstate *State) *modfile.File { + Init(loaderstate) + modFile := loaderstate.MainModules.ModFile(loaderstate.MainModules.mustGetSingleMainModule(loaderstate)) if modFile == nil { - die() + die(loaderstate) } return modFile } -func BinDir() string { - Init() +func BinDir(loaderstate *State) string { + Init(loaderstate) if cfg.GOBIN != "" { return cfg.GOBIN } @@ -360,13 +343,13 @@ func BinDir() string { // InitWorkfile initializes the workFilePath variable for commands that // operate in workspace mode. It should not be called by other commands, // for example 'go mod tidy', that don't operate in workspace mode. -func InitWorkfile() { +func (loaderstate *State) InitWorkfile() { // Initialize fsys early because we need overlay to read go.work file. fips140.Init() if err := fsys.Init(); err != nil { base.Fatal(err) } - workFilePath = FindGoWork(base.Cwd()) + loaderstate.workFilePath = loaderstate.FindGoWork(base.Cwd()) } // FindGoWork returns the name of the go.work file for this command, @@ -374,8 +357,8 @@ func InitWorkfile() { // Most code should use Init and Enabled rather than use this directly. // It is exported mainly for Go toolchain switching, which must process // the go.work very early at startup. -func FindGoWork(wd string) string { - if RootMode == NoRoot { +func (loaderstate *State) FindGoWork(wd string) string { + if loaderstate.RootMode == NoRoot { return "" } @@ -394,62 +377,90 @@ func FindGoWork(wd string) string { // WorkFilePath returns the absolute path of the go.work file, or "" if not in // workspace mode. WorkFilePath must be called after InitWorkfile. -func WorkFilePath() string { - return workFilePath +func WorkFilePath(loaderstate *State) string { + return loaderstate.workFilePath } // Reset clears all the initialized, cached state about the use of modules, // so that we can start over. -func Reset() { - setState(state{}) +func (s *State) Reset() { + s.setState(State{}) } -func setState(s state) state { - oldState := state{ - initialized: initialized, - forceUseModules: ForceUseModules, - rootMode: RootMode, - modRoots: modRoots, +func (s *State) setState(new State) State { + oldState := State{ + initialized: s.initialized, + ForceUseModules: s.ForceUseModules, + RootMode: s.RootMode, + modRoots: s.modRoots, modulesEnabled: cfg.ModulesEnabled, - mainModules: MainModules, - requirements: requirements, + MainModules: s.MainModules, + requirements: s.requirements, } - initialized = s.initialized - ForceUseModules = s.forceUseModules - RootMode = s.rootMode - modRoots = s.modRoots - cfg.ModulesEnabled = s.modulesEnabled - MainModules = s.mainModules - requirements = s.requirements - workFilePath = s.workFilePath + s.initialized = new.initialized + s.ForceUseModules = new.ForceUseModules + s.RootMode = new.RootMode + s.modRoots = new.modRoots + cfg.ModulesEnabled = new.modulesEnabled + s.MainModules = new.MainModules + s.requirements = new.requirements + s.workFilePath = new.workFilePath // The modfetch package's global state is used to compute // the go.sum file, so save and restore it along with the // modload state. - oldState.modfetchState = modfetch.SetState(s.modfetchState) + oldState.modfetchState = modfetch.SetState(new.modfetchState) return oldState } -type state struct { - initialized bool - forceUseModules bool - rootMode Root - modRoots []string - modulesEnabled bool - mainModules *MainModuleSet - requirements *Requirements - workFilePath string - modfetchState modfetch.State +type State struct { + initialized bool + allowMissingModuleImports bool + + // ForceUseModules may be set to force modules to be enabled when + // GO111MODULE=auto or to report an error when GO111MODULE=off. + ForceUseModules bool + + // RootMode determines whether a module root is needed. + RootMode Root + + // These are primarily used to initialize the MainModules, and should + // be eventually superseded by them but are still used in cases where + // the module roots are required but MainModules has not been + // initialized yet. Set to the modRoots of the main modules. + // modRoots != nil implies len(modRoots) > 0 + modRoots []string + modulesEnabled bool + MainModules *MainModuleSet + + // requirements is the requirement graph for the main module. + // + // It is always non-nil if the main module's go.mod file has been + // loaded. + // + // This variable should only be read from the loadModFile + // function, and should only be written in the loadModFile and + // commitRequirements functions. All other functions that need or + // produce a *Requirements should accept and/or return an explicit + // parameter. + requirements *Requirements + + // Set to the path to the go.work file, or "" if workspace mode is + // disabled + workFilePath string + modfetchState modfetch.State } +func NewState() *State { return &State{} } + // Init determines whether module mode is enabled, locates the root of the // current module (if any), sets environment variables for Git subprocesses, and // configures the cfg, codehost, load, modfetch, and search packages for use // with modules. -func Init() { - if initialized { +func Init(loaderstate *State) { + if loaderstate.initialized { return } - initialized = true + loaderstate.initialized = true fips140.Init() @@ -462,11 +473,11 @@ func Init() { default: base.Fatalf("go: unknown environment setting GO111MODULE=%s", env) case "auto": - mustUseModules = ForceUseModules + mustUseModules = loaderstate.ForceUseModules case "on", "": mustUseModules = true case "off": - if ForceUseModules { + if loaderstate.ForceUseModules { base.Fatalf("go: modules disabled by GO111MODULE=off; see 'go help modules'") } mustUseModules = false @@ -490,15 +501,15 @@ func Init() { if os.Getenv("GCM_INTERACTIVE") == "" { os.Setenv("GCM_INTERACTIVE", "never") } - if modRoots != nil { + if loaderstate.modRoots != nil { // modRoot set before Init was called ("go mod init" does this). // No need to search for go.mod. - } else if RootMode == NoRoot { + } else if loaderstate.RootMode == NoRoot { if cfg.ModFile != "" && !base.InGOFLAGS("-modfile") { base.Fatalf("go: -modfile cannot be used with commands that ignore the current module") } - modRoots = nil - } else if workFilePath != "" { + loaderstate.modRoots = nil + } else if loaderstate.workFilePath != "" { // We're in workspace mode, which implies module mode. if cfg.ModFile != "" { base.Fatalf("go: -modfile cannot be used in workspace mode") @@ -508,8 +519,8 @@ func Init() { if cfg.ModFile != "" { base.Fatalf("go: cannot find main module, but -modfile was set.\n\t-modfile cannot be used to set the module root directory.") } - if RootMode == NeedRoot { - base.Fatal(ErrNoModRoot) + if loaderstate.RootMode == NeedRoot { + base.Fatal(NewNoMainModulesError(loaderstate)) } if !mustUseModules { // GO111MODULE is 'auto', and we can't find a module root. @@ -523,14 +534,14 @@ func Init() { // It's a bit of a peculiar thing to disallow but quite mysterious // when it happens. See golang.org/issue/26708. fmt.Fprintf(os.Stderr, "go: warning: ignoring go.mod in system temp root %v\n", os.TempDir()) - if RootMode == NeedRoot { - base.Fatal(ErrNoModRoot) + if loaderstate.RootMode == NeedRoot { + base.Fatal(NewNoMainModulesError(loaderstate)) } if !mustUseModules { return } } else { - modRoots = []string{modRoot} + loaderstate.modRoots = []string{modRoot} } } if cfg.ModFile != "" && !strings.HasSuffix(cfg.ModFile, ".mod") { @@ -539,14 +550,14 @@ func Init() { // We're in module mode. Set any global variables that need to be set. cfg.ModulesEnabled = true - setDefaultBuildMod() + setDefaultBuildMod(loaderstate) list := filepath.SplitList(cfg.BuildContext.GOPATH) if len(list) > 0 && list[0] != "" { gopath = list[0] if _, err := fsys.Stat(filepath.Join(gopath, "go.mod")); err == nil { fmt.Fprintf(os.Stderr, "go: warning: ignoring go.mod in $GOPATH %v\n", gopath) - if RootMode == NeedRoot { - base.Fatal(ErrNoModRoot) + if loaderstate.RootMode == NeedRoot { + base.Fatal(NewNoMainModulesError(loaderstate)) } if !mustUseModules { return @@ -564,12 +575,12 @@ func Init() { // of 'go get', but Init reads the -modfile flag in 'go get', so it shouldn't // be called until the command is installed and flags are parsed. Instead of // calling Init and Enabled, the main package can call this function. -func WillBeEnabled() bool { - if modRoots != nil || cfg.ModulesEnabled { +func (loaderstate *State) WillBeEnabled() bool { + if loaderstate.modRoots != nil || cfg.ModulesEnabled { // Already enabled. return true } - if initialized { + if loaderstate.initialized { // Initialized, not enabled. return false } @@ -616,57 +627,77 @@ func FindGoMod(wd string) string { // If modules are enabled but there is no main module, Enabled returns true // and then the first use of module information will call die // (usually through MustModRoot). -func Enabled() bool { - Init() - return modRoots != nil || cfg.ModulesEnabled +func (loaderstate *State) Enabled() bool { + Init(loaderstate) + return loaderstate.modRoots != nil || cfg.ModulesEnabled } -func VendorDir() string { - if inWorkspaceMode() { - return filepath.Join(filepath.Dir(WorkFilePath()), "vendor") +func (s *State) vendorDir() (string, error) { + if s.inWorkspaceMode() { + return filepath.Join(filepath.Dir(WorkFilePath(s)), "vendor"), nil + } + mainModule, err := s.MainModules.getSingleMainModule(s) + if err != nil { + return "", err } // Even if -mod=vendor, we could be operating with no mod root (and thus no // vendor directory). As long as there are no dependencies that is expected // to work. See script/vendor_outside_module.txt. - modRoot := MainModules.ModRoot(MainModules.mustGetSingleMainModule()) + modRoot := s.MainModules.ModRoot(mainModule) if modRoot == "" { - panic("vendor directory does not exist when in single module mode outside of a module") + return "", errors.New("vendor directory does not exist when in single module mode outside of a module") } - return filepath.Join(modRoot, "vendor") + return filepath.Join(modRoot, "vendor"), nil } -func inWorkspaceMode() bool { - if !initialized { +func (s *State) VendorDirOrEmpty() string { + dir, err := s.vendorDir() + if err != nil { + return "" + } + return dir +} + +func VendorDir(loaderstate *State) string { + dir, err := loaderstate.vendorDir() + if err != nil { + panic(err) + } + return dir +} + +func (loaderstate *State) inWorkspaceMode() bool { + if !loaderstate.initialized { panic("inWorkspaceMode called before modload.Init called") } - if !Enabled() { + if !loaderstate.Enabled() { return false } - return workFilePath != "" + return loaderstate.workFilePath != "" } // HasModRoot reports whether a main module or main modules are present. // HasModRoot may return false even if Enabled returns true: for example, 'get' // does not require a main module. -func HasModRoot() bool { - Init() - return modRoots != nil +func (loaderstate *State) HasModRoot() bool { + Init(loaderstate) + return loaderstate.modRoots != nil } // MustHaveModRoot checks that a main module or main modules are present, // and calls base.Fatalf if there are no main modules. -func MustHaveModRoot() { - Init() - if !HasModRoot() { - die() +func (loaderstate *State) MustHaveModRoot() { + Init(loaderstate) + if !loaderstate.HasModRoot() { + die(loaderstate) } } // ModFilePath returns the path that would be used for the go.mod // file, if in module mode. ModFilePath calls base.Fatalf if there is no main // module, even if -modfile is set. -func ModFilePath() string { - MustHaveModRoot() +func (loaderstate *State) ModFilePath() string { + loaderstate.MustHaveModRoot() return modFilePath(findModuleRoot(base.Cwd())) } @@ -680,11 +711,11 @@ func modFilePath(modRoot string) string { return filepath.Join(modRoot, "go.mod") } -func die() { +func die(loaderstate *State) { if cfg.Getenv("GO111MODULE") == "off" { base.Fatalf("go: modules disabled by GO111MODULE=off; see 'go help modules'") } - if !inWorkspaceMode() { + if !loaderstate.inWorkspaceMode() { if dir, name := findAltConfig(base.Cwd()); dir != "" { rel, err := filepath.Rel(base.Cwd(), dir) if err != nil { @@ -697,21 +728,33 @@ func die() { base.Fatalf("go: cannot find main module, but found %s in %s\n\tto create a module there, run:\n\t%sgo mod init", name, dir, cdCmd) } } - base.Fatal(ErrNoModRoot) + base.Fatal(NewNoMainModulesError(loaderstate)) } +var ErrNoModRoot = errors.New("no module root") + // noMainModulesError returns the appropriate error if there is no main module or // main modules depending on whether the go command is in workspace mode. -type noMainModulesError struct{} +type noMainModulesError struct { + inWorkspaceMode bool +} func (e noMainModulesError) Error() string { - if inWorkspaceMode() { + if e.inWorkspaceMode { return "no modules were found in the current workspace; see 'go help work'" } return "go.mod file not found in current directory or any parent directory; see 'go help modules'" } -var ErrNoModRoot noMainModulesError +func (e noMainModulesError) Unwrap() error { + return ErrNoModRoot +} + +func NewNoMainModulesError(s *State) noMainModulesError { + return noMainModulesError{ + inWorkspaceMode: s.inWorkspaceMode(), + } +} type goModDirtyError struct{} @@ -862,33 +905,33 @@ func UpdateWorkFile(wf *modfile.WorkFile) { // other, but unlike LoadModGraph does not load the full module graph or check // it for global consistency. Most callers outside of the modload package should // use LoadModGraph instead. -func LoadModFile(ctx context.Context) *Requirements { - rs, err := loadModFile(ctx, nil) +func LoadModFile(loaderstate *State, ctx context.Context) *Requirements { + rs, err := loadModFile(loaderstate, ctx, nil) if err != nil { base.Fatal(err) } return rs } -func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) { - if requirements != nil { - return requirements, nil +func loadModFile(loaderstate *State, ctx context.Context, opts *PackageOpts) (*Requirements, error) { + if loaderstate.requirements != nil { + return loaderstate.requirements, nil } - Init() + Init(loaderstate) var workFile *modfile.WorkFile - if inWorkspaceMode() { + if loaderstate.inWorkspaceMode() { var err error - workFile, modRoots, err = LoadWorkFile(workFilePath) + workFile, loaderstate.modRoots, err = LoadWorkFile(loaderstate.workFilePath) if err != nil { return nil, err } - for _, modRoot := range modRoots { + for _, modRoot := range loaderstate.modRoots { sumFile := strings.TrimSuffix(modFilePath(modRoot), ".mod") + ".sum" - modfetch.WorkspaceGoSumFiles = append(modfetch.WorkspaceGoSumFiles, sumFile) + modfetch.ModuleFetchState.WorkspaceGoSumFiles = append(modfetch.ModuleFetchState.WorkspaceGoSumFiles, sumFile) } - modfetch.GoSumFile = workFilePath + ".sum" - } else if len(modRoots) == 0 { + modfetch.ModuleFetchState.GoSumFile = loaderstate.workFilePath + ".sum" + } else if len(loaderstate.modRoots) == 0 { // We're in module mode, but not inside a module. // // Commands like 'go build', 'go run', 'go list' have no go.mod file to @@ -907,25 +950,25 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) // // See golang.org/issue/32027. } else { - modfetch.GoSumFile = strings.TrimSuffix(modFilePath(modRoots[0]), ".mod") + ".sum" + modfetch.ModuleFetchState.GoSumFile = strings.TrimSuffix(modFilePath(loaderstate.modRoots[0]), ".mod") + ".sum" } - if len(modRoots) == 0 { + if len(loaderstate.modRoots) == 0 { // TODO(#49228): Instead of creating a fake module with an empty modroot, // make MainModules.Len() == 0 mean that we're in module mode but not inside // any module. mainModule := module.Version{Path: "command-line-arguments"} - MainModules = makeMainModules([]module.Version{mainModule}, []string{""}, []*modfile.File{nil}, []*modFileIndex{nil}, nil) + loaderstate.MainModules = makeMainModules(loaderstate, []module.Version{mainModule}, []string{""}, []*modfile.File{nil}, []*modFileIndex{nil}, nil) var ( goVersion string pruning modPruning roots []module.Version direct = map[string]bool{"go": true} ) - if inWorkspaceMode() { + if loaderstate.inWorkspaceMode() { // Since we are in a workspace, the Go version for the synthetic // "command-line-arguments" module must not exceed the Go version // for the workspace. - goVersion = MainModules.GoVersion() + goVersion = loaderstate.MainModules.GoVersion(loaderstate) pruning = workspace roots = []module.Version{ mainModule, @@ -941,26 +984,26 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) } } rawGoVersion.Store(mainModule, goVersion) - requirements = newRequirements(pruning, roots, direct) + loaderstate.requirements = newRequirements(loaderstate, pruning, roots, direct) if cfg.BuildMod == "vendor" { // For issue 56536: Some users may have GOFLAGS=-mod=vendor set. // Make sure it behaves as though the fake module is vendored // with no dependencies. - requirements.initVendor(nil) + loaderstate.requirements.initVendor(loaderstate, nil) } - return requirements, nil + return loaderstate.requirements, nil } var modFiles []*modfile.File var mainModules []module.Version var indices []*modFileIndex var errs []error - for _, modroot := range modRoots { + for _, modroot := range loaderstate.modRoots { gomod := modFilePath(modroot) var fixed bool - data, f, err := ReadModFile(gomod, fixVersion(ctx, &fixed)) + data, f, err := ReadModFile(gomod, fixVersion(loaderstate, ctx, &fixed)) if err != nil { - if inWorkspaceMode() { + if loaderstate.inWorkspaceMode() { if tooNew, ok := err.(*gover.TooNewError); ok && !strings.HasPrefix(cfg.CmdName, "work ") { // Switching to a newer toolchain won't help - the go.work has the wrong version. // Report this more specific error, unless we are a command like 'go work use' @@ -975,7 +1018,7 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) errs = append(errs, err) continue } - if inWorkspaceMode() && !strings.HasPrefix(cfg.CmdName, "work ") { + if loaderstate.inWorkspaceMode() && !strings.HasPrefix(cfg.CmdName, "work ") { // Refuse to use workspace if its go version is too old. // Disable this check if we are a workspace command like work use or work sync, // which will fix the problem. @@ -987,7 +1030,7 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) } } - if !inWorkspaceMode() { + if !loaderstate.inWorkspaceMode() { ok := true for _, g := range f.Godebug { if err := CheckGodebug("godebug", g.Key, g.Value); err != nil { @@ -1016,45 +1059,45 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) return nil, errors.Join(errs...) } - MainModules = makeMainModules(mainModules, modRoots, modFiles, indices, workFile) - setDefaultBuildMod() // possibly enable automatic vendoring - rs := requirementsFromModFiles(ctx, workFile, modFiles, opts) + loaderstate.MainModules = makeMainModules(loaderstate, mainModules, loaderstate.modRoots, modFiles, indices, workFile) + setDefaultBuildMod(loaderstate) // possibly enable automatic vendoring + rs := requirementsFromModFiles(loaderstate, ctx, workFile, modFiles, opts) if cfg.BuildMod == "vendor" { - readVendorList(VendorDir()) - versions := MainModules.Versions() + readVendorList(VendorDir(loaderstate)) + versions := loaderstate.MainModules.Versions() indexes := make([]*modFileIndex, 0, len(versions)) modFiles := make([]*modfile.File, 0, len(versions)) modRoots := make([]string, 0, len(versions)) for _, m := range versions { - indexes = append(indexes, MainModules.Index(m)) - modFiles = append(modFiles, MainModules.ModFile(m)) - modRoots = append(modRoots, MainModules.ModRoot(m)) + indexes = append(indexes, loaderstate.MainModules.Index(m)) + modFiles = append(modFiles, loaderstate.MainModules.ModFile(m)) + modRoots = append(modRoots, loaderstate.MainModules.ModRoot(m)) } - checkVendorConsistency(indexes, modFiles, modRoots) - rs.initVendor(vendorList) + checkVendorConsistency(loaderstate, indexes, modFiles, modRoots) + rs.initVendor(loaderstate, vendorList) } - if inWorkspaceMode() { + if loaderstate.inWorkspaceMode() { // We don't need to update the mod file so return early. - requirements = rs + loaderstate.requirements = rs return rs, nil } - mainModule := MainModules.mustGetSingleMainModule() + mainModule := loaderstate.MainModules.mustGetSingleMainModule(loaderstate) - if rs.hasRedundantRoot() { + if rs.hasRedundantRoot(loaderstate) { // If any module path appears more than once in the roots, we know that the // go.mod file needs to be updated even though we have not yet loaded any // transitive dependencies. var err error - rs, err = updateRoots(ctx, rs.direct, rs, nil, nil, false) + rs, err = updateRoots(loaderstate, ctx, rs.direct, rs, nil, nil, false) if err != nil { return nil, err } } - if MainModules.Index(mainModule).goVersion == "" && rs.pruning != workspace { + if loaderstate.MainModules.Index(mainModule).goVersion == "" && rs.pruning != workspace { // TODO(#45551): Do something more principled instead of checking // cfg.CmdName directly here. if cfg.BuildMod == "mod" && cfg.CmdName != "mod graph" && cfg.CmdName != "mod why" { @@ -1063,8 +1106,8 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) if opts != nil && opts.TidyGoVersion != "" { v = opts.TidyGoVersion } - addGoStmt(MainModules.ModFile(mainModule), mainModule, v) - rs = overrideRoots(ctx, rs, []module.Version{{Path: "go", Version: v}}) + addGoStmt(loaderstate.MainModules.ModFile(mainModule), mainModule, v) + rs = overrideRoots(loaderstate, ctx, rs, []module.Version{{Path: "go", Version: v}}) // We need to add a 'go' version to the go.mod file, but we must assume // that its existing contents match something between Go 1.11 and 1.16. @@ -1073,7 +1116,7 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) // requirements to support pruning. if gover.Compare(v, gover.ExplicitIndirectVersion) >= 0 { var err error - rs, err = convertPruning(ctx, rs, pruned) + rs, err = convertPruning(loaderstate, ctx, rs, pruned) if err != nil { return nil, err } @@ -1083,8 +1126,8 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) } } - requirements = rs - return requirements, nil + loaderstate.requirements = rs + return loaderstate.requirements, nil } func errWorkTooOld(gomod string, wf *modfile.WorkFile, goVers string) error { @@ -1117,10 +1160,10 @@ func CheckReservedModulePath(path string) error { // translate it to go.mod directives. The resulting build list may not be // exactly the same as in the legacy configuration (for example, we can't get // packages at multiple versions from the same module). -func CreateModFile(ctx context.Context, modPath string) { +func CreateModFile(loaderstate *State, ctx context.Context, modPath string) { modRoot := base.Cwd() - modRoots = []string{modRoot} - Init() + loaderstate.modRoots = []string{modRoot} + Init(loaderstate) modFilePath := modFilePath(modRoot) if _, err := fsys.Stat(modFilePath); err == nil { base.Fatalf("go: %s already exists", modFilePath) @@ -1156,16 +1199,16 @@ func CreateModFile(ctx context.Context, modPath string) { fmt.Fprintf(os.Stderr, "go: creating new go.mod: module %s\n", modPath) modFile := new(modfile.File) modFile.AddModuleStmt(modPath) - MainModules = makeMainModules([]module.Version{modFile.Module.Mod}, []string{modRoot}, []*modfile.File{modFile}, []*modFileIndex{nil}, nil) + loaderstate.MainModules = makeMainModules(loaderstate, []module.Version{modFile.Module.Mod}, []string{modRoot}, []*modfile.File{modFile}, []*modFileIndex{nil}, nil) addGoStmt(modFile, modFile.Module.Mod, gover.Local()) // Add the go directive before converted module requirements. - rs := requirementsFromModFiles(ctx, nil, []*modfile.File{modFile}, nil) - rs, err := updateRoots(ctx, rs.direct, rs, nil, nil, false) + rs := requirementsFromModFiles(loaderstate, ctx, nil, []*modfile.File{modFile}, nil) + rs, err := updateRoots(loaderstate, ctx, rs.direct, rs, nil, nil, false) if err != nil { base.Fatal(err) } - requirements = rs - if err := commitRequirements(ctx, WriteOpts{}); err != nil { + loaderstate.requirements = rs + if err := commitRequirements(loaderstate, ctx, WriteOpts{}); err != nil { base.Fatal(err) } @@ -1200,7 +1243,7 @@ func CreateModFile(ctx context.Context, modPath string) { // and does nothing for versions that already appear to be canonical. // // The VersionFixer sets 'fixed' if it ever returns a non-canonical version. -func fixVersion(ctx context.Context, fixed *bool) modfile.VersionFixer { +func fixVersion(loaderstate *State, ctx context.Context, fixed *bool) modfile.VersionFixer { return func(path, vers string) (resolved string, err error) { defer func() { if err == nil && resolved != vers { @@ -1233,7 +1276,7 @@ func fixVersion(ctx context.Context, fixed *bool) modfile.VersionFixer { return vers, nil } - info, err := Query(ctx, path, vers, "", nil) + info, err := Query(loaderstate, ctx, path, vers, "", nil) if err != nil { return "", err } @@ -1248,16 +1291,16 @@ func fixVersion(ctx context.Context, fixed *bool) modfile.VersionFixer { // // This function affects the default cfg.BuildMod when outside of a module, // so it can only be called prior to Init. -func AllowMissingModuleImports() { - if initialized { +func (s *State) AllowMissingModuleImports() { + if s.initialized { panic("AllowMissingModuleImports after Init") } - allowMissingModuleImports = true + s.allowMissingModuleImports = true } // makeMainModules creates a MainModuleSet and associated variables according to // the given main modules. -func makeMainModules(ms []module.Version, rootDirs []string, modFiles []*modfile.File, indices []*modFileIndex, workFile *modfile.WorkFile) *MainModuleSet { +func makeMainModules(loaderstate *State, ms []module.Version, rootDirs []string, modFiles []*modfile.File, indices []*modFileIndex, workFile *modfile.WorkFile) *MainModuleSet { for _, m := range ms { if m.Version != "" { panic("mainModulesCalled with module.Version with non empty Version field: " + fmt.Sprintf("%#v", m)) @@ -1332,7 +1375,7 @@ func makeMainModules(ms []module.Version, rootDirs []string, modFiles []*modfile continue } var newV module.Version = r.New - if WorkFilePath() != "" && newV.Version == "" && !filepath.IsAbs(newV.Path) { + if WorkFilePath(loaderstate) != "" && newV.Version == "" && !filepath.IsAbs(newV.Path) { // Since we are in a workspace, we may be loading replacements from // multiple go.mod files. Relative paths in those replacement are // relative to the go.mod file, not the workspace, so the same string @@ -1374,14 +1417,14 @@ func makeMainModules(ms []module.Version, rootDirs []string, modFiles []*modfile // requirementsFromModFiles returns the set of non-excluded requirements from // the global modFile. -func requirementsFromModFiles(ctx context.Context, workFile *modfile.WorkFile, modFiles []*modfile.File, opts *PackageOpts) *Requirements { +func requirementsFromModFiles(loaderstate *State, ctx context.Context, workFile *modfile.WorkFile, modFiles []*modfile.File, opts *PackageOpts) *Requirements { var roots []module.Version direct := map[string]bool{} var pruning modPruning - if inWorkspaceMode() { + if loaderstate.inWorkspaceMode() { pruning = workspace - roots = make([]module.Version, len(MainModules.Versions()), 2+len(MainModules.Versions())) - copy(roots, MainModules.Versions()) + roots = make([]module.Version, len(loaderstate.MainModules.Versions()), 2+len(loaderstate.MainModules.Versions())) + copy(roots, loaderstate.MainModules.Versions()) goVersion := gover.FromGoWork(workFile) var toolchain string if workFile.Toolchain != nil { @@ -1390,16 +1433,16 @@ func requirementsFromModFiles(ctx context.Context, workFile *modfile.WorkFile, m roots = appendGoAndToolchainRoots(roots, goVersion, toolchain, direct) direct = directRequirements(modFiles) } else { - pruning = pruningForGoVersion(MainModules.GoVersion()) + pruning = pruningForGoVersion(loaderstate.MainModules.GoVersion(loaderstate)) if len(modFiles) != 1 { panic(fmt.Errorf("requirementsFromModFiles called with %v modfiles outside workspace mode", len(modFiles))) } modFile := modFiles[0] - roots, direct = rootsFromModFile(MainModules.mustGetSingleMainModule(), modFile, withToolchainRoot) + roots, direct = rootsFromModFile(loaderstate, loaderstate.MainModules.mustGetSingleMainModule(loaderstate), modFile, withToolchainRoot) } gover.ModSort(roots) - rs := newRequirements(pruning, roots, direct) + rs := newRequirements(loaderstate, pruning, roots, direct) return rs } @@ -1422,7 +1465,7 @@ func directRequirements(modFiles []*modfile.File) map[string]bool { return direct } -func rootsFromModFile(m module.Version, modFile *modfile.File, addToolchainRoot addToolchainRoot) (roots []module.Version, direct map[string]bool) { +func rootsFromModFile(loaderstate *State, m module.Version, modFile *modfile.File, addToolchainRoot addToolchainRoot) (roots []module.Version, direct map[string]bool) { direct = make(map[string]bool) padding := 2 // Add padding for the toolchain and go version, added upon return. if !addToolchainRoot { @@ -1430,7 +1473,7 @@ func rootsFromModFile(m module.Version, modFile *modfile.File, addToolchainRoot } roots = make([]module.Version, 0, padding+len(modFile.Require)) for _, r := range modFile.Require { - if index := MainModules.Index(m); index != nil && index.exclude[r.Mod] { + if index := loaderstate.MainModules.Index(m); index != nil && index.exclude[r.Mod] { if cfg.BuildMod == "mod" { fmt.Fprintf(os.Stderr, "go: dropping requirement on excluded version %s %s\n", r.Mod.Path, r.Mod.Version) } else { @@ -1471,9 +1514,9 @@ func appendGoAndToolchainRoots(roots []module.Version, goVersion, toolchain stri // setDefaultBuildMod sets a default value for cfg.BuildMod if the -mod flag // wasn't provided. setDefaultBuildMod may be called multiple times. -func setDefaultBuildMod() { +func setDefaultBuildMod(loaderstate *State) { if cfg.BuildModExplicit { - if inWorkspaceMode() && cfg.BuildMod != "readonly" && cfg.BuildMod != "vendor" { + if loaderstate.inWorkspaceMode() && cfg.BuildMod != "readonly" && cfg.BuildMod != "vendor" { switch cfg.CmdName { case "work sync", "mod graph", "mod verify", "mod why": // These commands run with BuildMod set to mod, but they don't take the @@ -1508,8 +1551,8 @@ func setDefaultBuildMod() { cfg.BuildMod = "readonly" return } - if modRoots == nil { - if allowMissingModuleImports { + if loaderstate.modRoots == nil { + if loaderstate.allowMissingModuleImports { cfg.BuildMod = "mod" } else { cfg.BuildMod = "readonly" @@ -1517,29 +1560,29 @@ func setDefaultBuildMod() { return } - if len(modRoots) >= 1 { + if len(loaderstate.modRoots) >= 1 { var goVersion string var versionSource string - if inWorkspaceMode() { + if loaderstate.inWorkspaceMode() { versionSource = "go.work" - if wfg := MainModules.WorkFile().Go; wfg != nil { + if wfg := loaderstate.MainModules.WorkFile().Go; wfg != nil { goVersion = wfg.Version } } else { versionSource = "go.mod" - index := MainModules.GetSingleIndexOrNil() + index := loaderstate.MainModules.GetSingleIndexOrNil(loaderstate) if index != nil { goVersion = index.goVersion } } vendorDir := "" - if workFilePath != "" { - vendorDir = filepath.Join(filepath.Dir(workFilePath), "vendor") + if loaderstate.workFilePath != "" { + vendorDir = filepath.Join(filepath.Dir(loaderstate.workFilePath), "vendor") } else { - if len(modRoots) != 1 { - panic(fmt.Errorf("outside workspace mode, but have %v modRoots", modRoots)) + if len(loaderstate.modRoots) != 1 { + panic(fmt.Errorf("outside workspace mode, but have %v modRoots", loaderstate.modRoots)) } - vendorDir = filepath.Join(modRoots[0], "vendor") + vendorDir = filepath.Join(loaderstate.modRoots[0], "vendor") } if fi, err := fsys.Stat(vendorDir); err == nil && fi.IsDir() { if goVersion != "" { @@ -1607,8 +1650,8 @@ func modulesTextIsForWorkspace(vendorDir string) (bool, error) { return false, nil } -func mustHaveCompleteRequirements() bool { - return cfg.BuildMod != "mod" && !inWorkspaceMode() +func mustHaveCompleteRequirements(loaderstate *State) bool { + return cfg.BuildMod != "mod" && !loaderstate.inWorkspaceMode() } // addGoStmt adds a go directive to the go.mod file if it does not already @@ -1802,22 +1845,22 @@ type WriteOpts struct { } // WriteGoMod writes the current build list back to go.mod. -func WriteGoMod(ctx context.Context, opts WriteOpts) error { - requirements = LoadModFile(ctx) - return commitRequirements(ctx, opts) +func WriteGoMod(loaderstate *State, ctx context.Context, opts WriteOpts) error { + loaderstate.requirements = LoadModFile(loaderstate, ctx) + return commitRequirements(loaderstate, ctx, opts) } var errNoChange = errors.New("no update needed") // UpdateGoModFromReqs returns a modified go.mod file using the current // requirements. It does not commit these changes to disk. -func UpdateGoModFromReqs(ctx context.Context, opts WriteOpts) (before, after []byte, modFile *modfile.File, err error) { - if MainModules.Len() != 1 || MainModules.ModRoot(MainModules.Versions()[0]) == "" { +func UpdateGoModFromReqs(loaderstate *State, ctx context.Context, opts WriteOpts) (before, after []byte, modFile *modfile.File, err error) { + if loaderstate.MainModules.Len() != 1 || loaderstate.MainModules.ModRoot(loaderstate.MainModules.Versions()[0]) == "" { // We aren't in a module, so we don't have anywhere to write a go.mod file. return nil, nil, nil, errNoChange } - mainModule := MainModules.mustGetSingleMainModule() - modFile = MainModules.ModFile(mainModule) + mainModule := loaderstate.MainModules.mustGetSingleMainModule(loaderstate) + modFile = loaderstate.MainModules.ModFile(mainModule) if modFile == nil { // command-line-arguments has no .mod file to write. return nil, nil, nil, errNoChange @@ -1830,7 +1873,7 @@ func UpdateGoModFromReqs(ctx context.Context, opts WriteOpts) (before, after []b var list []*modfile.Require toolchain := "" goVersion := "" - for _, m := range requirements.rootModules { + for _, m := range loaderstate.requirements.rootModules { if m.Path == "go" { goVersion = m.Version continue @@ -1841,7 +1884,7 @@ func UpdateGoModFromReqs(ctx context.Context, opts WriteOpts) (before, after []b } list = append(list, &modfile.Require{ Mod: m, - Indirect: !requirements.direct[m.Path], + Indirect: !loaderstate.requirements.direct[m.Path], }) } @@ -1911,13 +1954,13 @@ func UpdateGoModFromReqs(ctx context.Context, opts WriteOpts) (before, after []b // go.mod or go.sum are out of date in a semantically significant way. // // In workspace mode, commitRequirements only writes changes to go.work.sum. -func commitRequirements(ctx context.Context, opts WriteOpts) (err error) { - if inWorkspaceMode() { +func commitRequirements(loaderstate *State, ctx context.Context, opts WriteOpts) (err error) { + if loaderstate.inWorkspaceMode() { // go.mod files aren't updated in workspace mode, but we still want to // update the go.work.sum file. - return modfetch.WriteGoSum(ctx, keepSums(ctx, loaded, requirements, addBuildListZipSums), mustHaveCompleteRequirements()) + return modfetch.WriteGoSum(ctx, keepSums(loaderstate, ctx, loaded, loaderstate.requirements, addBuildListZipSums), mustHaveCompleteRequirements(loaderstate)) } - _, updatedGoMod, modFile, err := UpdateGoModFromReqs(ctx, opts) + _, updatedGoMod, modFile, err := UpdateGoModFromReqs(loaderstate, ctx, opts) if err != nil { if errors.Is(err, errNoChange) { return nil @@ -1925,7 +1968,7 @@ func commitRequirements(ctx context.Context, opts WriteOpts) (err error) { return err } - index := MainModules.GetSingleIndexOrNil() + index := loaderstate.MainModules.GetSingleIndexOrNil(loaderstate) dirty := index.modFileIsDirty(modFile) || len(opts.DropTools) > 0 || len(opts.AddTools) > 0 if dirty && cfg.BuildMod != "mod" { // If we're about to fail due to -mod=readonly, @@ -1939,15 +1982,15 @@ func commitRequirements(ctx context.Context, opts WriteOpts) (err error) { // Don't write go.mod, but write go.sum in case we added or trimmed sums. // 'go mod init' shouldn't write go.sum, since it will be incomplete. if cfg.CmdName != "mod init" { - if err := modfetch.WriteGoSum(ctx, keepSums(ctx, loaded, requirements, addBuildListZipSums), mustHaveCompleteRequirements()); err != nil { + if err := modfetch.WriteGoSum(ctx, keepSums(loaderstate, ctx, loaded, loaderstate.requirements, addBuildListZipSums), mustHaveCompleteRequirements(loaderstate)); err != nil { return err } } return nil } - mainModule := MainModules.mustGetSingleMainModule() - modFilePath := modFilePath(MainModules.ModRoot(mainModule)) + mainModule := loaderstate.MainModules.mustGetSingleMainModule(loaderstate) + modFilePath := modFilePath(loaderstate.MainModules.ModRoot(mainModule)) if fsys.Replaced(modFilePath) { if dirty { return errors.New("updates to go.mod needed, but go.mod is part of the overlay specified with -overlay") @@ -1956,13 +1999,13 @@ func commitRequirements(ctx context.Context, opts WriteOpts) (err error) { } defer func() { // At this point we have determined to make the go.mod file on disk equal to new. - MainModules.SetIndex(mainModule, indexModFile(updatedGoMod, modFile, mainModule, false)) + loaderstate.MainModules.SetIndex(mainModule, indexModFile(updatedGoMod, modFile, mainModule, false)) // Update go.sum after releasing the side lock and refreshing the index. // 'go mod init' shouldn't write go.sum, since it will be incomplete. if cfg.CmdName != "mod init" { if err == nil { - err = modfetch.WriteGoSum(ctx, keepSums(ctx, loaded, requirements, addBuildListZipSums), mustHaveCompleteRequirements()) + err = modfetch.WriteGoSum(ctx, keepSums(loaderstate, ctx, loaded, loaderstate.requirements, addBuildListZipSums), mustHaveCompleteRequirements(loaderstate)) } } }() @@ -2005,7 +2048,7 @@ func commitRequirements(ctx context.Context, opts WriteOpts) (err error) { // including any go.mod files needed to reconstruct the MVS result // or identify go versions, // in addition to the checksums for every module in keepMods. -func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums) map[module.Version]bool { +func keepSums(loaderstate *State, ctx context.Context, ld *loader, rs *Requirements, which whichSums) map[module.Version]bool { // Every module in the full module graph contributes its requirements, // so in order to ensure that the build list itself is reproducible, // we need sums for every go.mod in the graph (regardless of whether @@ -2018,12 +2061,12 @@ func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums // ambiguous import errors the next time we load the package. keepModSumsForZipSums := true if ld == nil { - if gover.Compare(MainModules.GoVersion(), gover.TidyGoModSumVersion) < 0 && cfg.BuildMod != "mod" { + if gover.Compare(loaderstate.MainModules.GoVersion(loaderstate), gover.TidyGoModSumVersion) < 0 && cfg.BuildMod != "mod" { keepModSumsForZipSums = false } } else { keepPkgGoModSums := true - if gover.Compare(ld.requirements.GoVersion(), gover.TidyGoModSumVersion) < 0 && (ld.Tidy || cfg.BuildMod != "mod") { + if gover.Compare(ld.requirements.GoVersion(loaderstate), gover.TidyGoModSumVersion) < 0 && (ld.Tidy || cfg.BuildMod != "mod") { keepPkgGoModSums = false keepModSumsForZipSums = false } @@ -2041,21 +2084,21 @@ func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums // minor, so we maintain the previous (buggy) behavior in 'go mod tidy' to // avoid introducing unnecessary churn. if keepPkgGoModSums { - r := resolveReplacement(pkg.mod) + r := resolveReplacement(loaderstate, pkg.mod) keep[modkey(r)] = true } if rs.pruning == pruned && pkg.mod.Path != "" { - if v, ok := rs.rootSelected(pkg.mod.Path); ok && v == pkg.mod.Version { + if v, ok := rs.rootSelected(loaderstate, pkg.mod.Path); ok && v == pkg.mod.Version { // pkg was loaded from a root module, and because the main module has // a pruned module graph we do not check non-root modules for // conflicts for packages that can be found in roots. So we only need // the checksums for the root modules that may contain pkg, not all // possible modules. for prefix := pkg.path; prefix != "."; prefix = path.Dir(prefix) { - if v, ok := rs.rootSelected(prefix); ok && v != "none" { + if v, ok := rs.rootSelected(loaderstate, prefix); ok && v != "none" { m := module.Version{Path: prefix, Version: v} - r := resolveReplacement(m) + r := resolveReplacement(loaderstate, m) keep[r] = true } } @@ -2063,11 +2106,11 @@ func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums } } - mg, _ := rs.Graph(ctx) + mg, _ := rs.Graph(loaderstate, ctx) for prefix := pkg.path; prefix != "."; prefix = path.Dir(prefix) { if v := mg.Selected(prefix); v != "none" { m := module.Version{Path: prefix, Version: v} - r := resolveReplacement(m) + r := resolveReplacement(loaderstate, m) keep[r] = true } } @@ -2079,27 +2122,27 @@ func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums // Save sums for the root modules (or their replacements), but don't // incur the cost of loading the graph just to find and retain the sums. for _, m := range rs.rootModules { - r := resolveReplacement(m) + r := resolveReplacement(loaderstate, m) keep[modkey(r)] = true if which == addBuildListZipSums { keep[r] = true } } } else { - mg, _ := rs.Graph(ctx) + mg, _ := rs.Graph(loaderstate, ctx) mg.WalkBreadthFirst(func(m module.Version) { if _, ok := mg.RequiredBy(m); ok { // The requirements from m's go.mod file are present in the module graph, // so they are relevant to the MVS result regardless of whether m was // actually selected. - r := resolveReplacement(m) + r := resolveReplacement(loaderstate, m) keep[modkey(r)] = true } }) if which == addBuildListZipSums { for _, m := range mg.BuildList() { - r := resolveReplacement(m) + r := resolveReplacement(loaderstate, m) if keepModSumsForZipSums { keep[modkey(r)] = true // we need the go version from the go.mod file to do anything useful with the zipfile } @@ -2199,9 +2242,12 @@ func CheckGodebug(verb, k, v string) error { } return nil } - for _, info := range godebugs.All { - if k == info.Name { - return nil + if godebugs.Lookup(k) != nil { + return nil + } + for _, info := range godebugs.Removed { + if info.Name == k { + return fmt.Errorf("use of removed %s %q, see https://go.dev/doc/godebug#go-1%v", verb, k, info.Removed) } } return fmt.Errorf("unknown %s %q", verb, k) diff --git a/src/cmd/go/internal/modload/list.go b/src/cmd/go/internal/modload/list.go index 53cb6c2ffe1..316fda4003b 100644 --- a/src/cmd/go/internal/modload/list.go +++ b/src/cmd/go/internal/modload/list.go @@ -41,7 +41,7 @@ const ( // along with any error preventing additional matches from being identified. // // The returned slice can be nonempty even if the error is non-nil. -func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile string) ([]*modinfo.ModulePublic, error) { +func ListModules(loaderstate *State, ctx context.Context, args []string, mode ListMode, reuseFile string) ([]*modinfo.ModulePublic, error) { var reuse map[module.Version]*modinfo.ModulePublic if reuseFile != "" { data, err := os.ReadFile(reuseFile) @@ -69,7 +69,7 @@ func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile st } } - rs, mods, err := listModules(ctx, LoadModFile(ctx), args, mode, reuse) + rs, mods, err := listModules(loaderstate, ctx, LoadModFile(loaderstate, ctx), args, mode, reuse) type token struct{} sem := make(chan token, runtime.GOMAXPROCS(0)) @@ -82,16 +82,16 @@ func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile st sem <- token{} go func() { if mode&ListU != 0 { - addUpdate(ctx, m) + addUpdate(loaderstate, ctx, m) } if mode&ListVersions != 0 { - addVersions(ctx, m, mode&ListRetractedVersions != 0) + addVersions(loaderstate, ctx, m, mode&ListRetractedVersions != 0) } if mode&ListRetracted != 0 { - addRetraction(ctx, m) + addRetraction(loaderstate, ctx, m) } if mode&ListDeprecated != 0 { - addDeprecation(ctx, m) + addDeprecation(loaderstate, ctx, m) } <-sem }() @@ -109,7 +109,7 @@ func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile st } if err == nil { - requirements = rs + loaderstate.requirements = rs // TODO(#61605): The extra ListU clause fixes a problem with Go 1.21rc3 // where "go mod tidy" and "go list -m -u all" fight over whether the go.sum // should be considered up-to-date. The fix for now is to always treat the @@ -117,20 +117,20 @@ func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile st // but in general list -u is looking up other checksums in the checksum database // that won't be necessary later, so it makes sense not to write the go.sum back out. if !ExplicitWriteGoMod && mode&ListU == 0 { - err = commitRequirements(ctx, WriteOpts{}) + err = commitRequirements(loaderstate, ctx, WriteOpts{}) } } return mods, err } -func listModules(ctx context.Context, rs *Requirements, args []string, mode ListMode, reuse map[module.Version]*modinfo.ModulePublic) (_ *Requirements, mods []*modinfo.ModulePublic, mgErr error) { +func listModules(loaderstate *State, ctx context.Context, rs *Requirements, args []string, mode ListMode, reuse map[module.Version]*modinfo.ModulePublic) (_ *Requirements, mods []*modinfo.ModulePublic, mgErr error) { if len(args) == 0 { var ms []*modinfo.ModulePublic - for _, m := range MainModules.Versions() { + for _, m := range loaderstate.MainModules.Versions() { if gover.IsToolchain(m.Path) { continue } - ms = append(ms, moduleInfo(ctx, rs, m, mode, reuse)) + ms = append(ms, moduleInfo(loaderstate, ctx, rs, m, mode, reuse)) } return rs, ms, nil } @@ -145,33 +145,33 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List } if arg == "all" || strings.Contains(arg, "...") { needFullGraph = true - if !HasModRoot() { - base.Fatalf("go: cannot match %q: %v", arg, ErrNoModRoot) + if !loaderstate.HasModRoot() { + base.Fatalf("go: cannot match %q: %v", arg, NewNoMainModulesError(loaderstate)) } continue } if path, vers, found := strings.Cut(arg, "@"); found { if vers == "upgrade" || vers == "patch" { - if _, ok := rs.rootSelected(path); !ok || rs.pruning == unpruned { + if _, ok := rs.rootSelected(loaderstate, path); !ok || rs.pruning == unpruned { needFullGraph = true - if !HasModRoot() { - base.Fatalf("go: cannot match %q: %v", arg, ErrNoModRoot) + if !loaderstate.HasModRoot() { + base.Fatalf("go: cannot match %q: %v", arg, NewNoMainModulesError(loaderstate)) } } } continue } - if _, ok := rs.rootSelected(arg); !ok || rs.pruning == unpruned { + if _, ok := rs.rootSelected(loaderstate, arg); !ok || rs.pruning == unpruned { needFullGraph = true - if mode&ListVersions == 0 && !HasModRoot() { - base.Fatalf("go: cannot match %q without -versions or an explicit version: %v", arg, ErrNoModRoot) + if mode&ListVersions == 0 && !loaderstate.HasModRoot() { + base.Fatalf("go: cannot match %q without -versions or an explicit version: %v", arg, NewNoMainModulesError(loaderstate)) } } } var mg *ModuleGraph if needFullGraph { - rs, mg, mgErr = expandGraph(ctx, rs) + rs, mg, mgErr = expandGraph(loaderstate, ctx, rs) } matchedModule := map[module.Version]bool{} @@ -179,7 +179,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List if path, vers, found := strings.Cut(arg, "@"); found { var current string if mg == nil { - current, _ = rs.rootSelected(path) + current, _ = rs.rootSelected(loaderstate, path) } else { current = mg.Selected(path) } @@ -192,13 +192,13 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List } } - allowed := CheckAllowed + allowed := loaderstate.CheckAllowed if IsRevisionQuery(path, vers) || mode&ListRetracted != 0 { // Allow excluded and retracted versions if the user asked for a // specific revision or used 'go list -retracted'. allowed = nil } - info, err := queryReuse(ctx, path, vers, current, allowed, reuse) + info, err := queryReuse(loaderstate, ctx, path, vers, current, allowed, reuse) if err != nil { var origin *codehost.Origin if info != nil { @@ -217,7 +217,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List // *Requirements instead. var noRS *Requirements - mod := moduleInfo(ctx, noRS, module.Version{Path: path, Version: info.Version}, mode, reuse) + mod := moduleInfo(loaderstate, ctx, noRS, module.Version{Path: path, Version: info.Version}, mode, reuse) if vers != mod.Version { mod.Query = vers } @@ -237,7 +237,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List var v string if mg == nil { var ok bool - v, ok = rs.rootSelected(arg) + v, ok = rs.rootSelected(loaderstate, arg) if !ok { // We checked rootSelected(arg) in the earlier args loop, so if there // is no such root we should have loaded a non-nil mg. @@ -251,7 +251,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List continue } if v != "none" { - mods = append(mods, moduleInfo(ctx, rs, module.Version{Path: arg, Version: v}, mode, reuse)) + mods = append(mods, moduleInfo(loaderstate, ctx, rs, module.Version{Path: arg, Version: v}, mode, reuse)) } else if cfg.BuildMod == "vendor" { // In vendor mode, we can't determine whether a missing module is “a // known dependency” because the module graph is incomplete. @@ -292,7 +292,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List fetchedMods := make([]*modinfo.ModulePublic, len(matches)) for i, m := range matches { q.Add(func() { - fetchedMods[i] = moduleInfo(ctx, rs, m, mode, reuse) + fetchedMods[i] = moduleInfo(loaderstate, ctx, rs, m, mode, reuse) }) } <-q.Idle() @@ -305,13 +305,11 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List // modinfoError wraps an error to create an error message in // modinfo.ModuleError with minimal redundancy. func modinfoError(path, vers string, err error) *modinfo.ModuleError { - var nerr *NoMatchingVersionError - var merr *module.ModuleError - if errors.As(err, &nerr) { + if _, ok := errors.AsType[*NoMatchingVersionError](err); ok { // NoMatchingVersionError contains the query, so we don't mention the // query again in ModuleError. err = &module.ModuleError{Path: path, Err: err} - } else if !errors.As(err, &merr) { + } else if _, ok := errors.AsType[*module.ModuleError](err); !ok { // If the error does not contain path and version, wrap it in a // module.ModuleError. err = &module.ModuleError{Path: path, Version: vers, Err: err} diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go index 8b2be3b300e..b4d128fe9a1 100644 --- a/src/cmd/go/internal/modload/load.go +++ b/src/cmd/go/internal/modload/load.go @@ -250,7 +250,7 @@ type PackageOpts struct { // LoadPackages identifies the set of packages matching the given patterns and // loads the packages in the import graph rooted at that set. -func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (matches []*search.Match, loadedPackages []string) { +func LoadPackages(loaderstate *State, ctx context.Context, opts PackageOpts, patterns ...string) (matches []*search.Match, loadedPackages []string) { if opts.Tags == nil { opts.Tags = imports.Tags() } @@ -271,11 +271,11 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma case m.IsLocal(): // Evaluate list of file system directories on first iteration. if m.Dirs == nil { - matchModRoots := modRoots + matchModRoots := loaderstate.modRoots if opts.MainModule != (module.Version{}) { - matchModRoots = []string{MainModules.ModRoot(opts.MainModule)} + matchModRoots = []string{loaderstate.MainModules.ModRoot(opts.MainModule)} } - matchLocalDirs(ctx, matchModRoots, m, rs) + matchLocalDirs(loaderstate, ctx, matchModRoots, m, rs) } // Make a copy of the directory list and translate to import paths. @@ -286,7 +286,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // the loader iterations. m.Pkgs = m.Pkgs[:0] for _, dir := range m.Dirs { - pkg, err := resolveLocalPackage(ctx, dir, rs) + pkg, err := resolveLocalPackage(loaderstate, ctx, dir, rs) if err != nil { if !m.IsLiteral() && (err == errPkgIsBuiltin || err == errPkgIsGorootSrc) { continue // Don't include "builtin" or GOROOT/src in wildcard patterns. @@ -294,8 +294,8 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // If we're outside of a module, ensure that the failure mode // indicates that. - if !HasModRoot() { - die() + if !loaderstate.HasModRoot() { + die(loaderstate) } if ld != nil { @@ -311,7 +311,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma case strings.Contains(m.Pattern(), "..."): m.Errs = m.Errs[:0] - mg, err := rs.Graph(ctx) + mg, err := rs.Graph(loaderstate, ctx) if err != nil { // The module graph is (or may be) incomplete — perhaps we failed to // load the requirements of some module. This is an error in matching @@ -321,26 +321,26 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // necessarily prevent us from loading the packages we could find. m.Errs = append(m.Errs, err) } - matchPackages(ctx, m, opts.Tags, includeStd, mg.BuildList()) + matchPackages(loaderstate, ctx, m, opts.Tags, includeStd, mg.BuildList()) case m.Pattern() == "work": - matchModules := MainModules.Versions() + matchModules := loaderstate.MainModules.Versions() if opts.MainModule != (module.Version{}) { matchModules = []module.Version{opts.MainModule} } - matchPackages(ctx, m, opts.Tags, omitStd, matchModules) + matchPackages(loaderstate, ctx, m, opts.Tags, omitStd, matchModules) case m.Pattern() == "all": if ld == nil { // The initial roots are the packages and tools in the main module. // loadFromRoots will expand that to "all". m.Errs = m.Errs[:0] - matchModules := MainModules.Versions() + matchModules := loaderstate.MainModules.Versions() if opts.MainModule != (module.Version{}) { matchModules = []module.Version{opts.MainModule} } - matchPackages(ctx, m, opts.Tags, omitStd, matchModules) - for tool := range MainModules.Tools() { + matchPackages(loaderstate, ctx, m, opts.Tags, omitStd, matchModules) + for tool := range loaderstate.MainModules.Tools() { m.Pkgs = append(m.Pkgs, tool) } } else { @@ -355,7 +355,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma } case m.Pattern() == "tool": - for tool := range MainModules.Tools() { + for tool := range loaderstate.MainModules.Tools() { m.Pkgs = append(m.Pkgs, tool) } default: @@ -364,12 +364,12 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma } } - initialRS, err := loadModFile(ctx, &opts) + initialRS, err := loadModFile(loaderstate, ctx, &opts) if err != nil { base.Fatal(err) } - ld := loadFromRoots(ctx, loaderParams{ + ld := loadFromRoots(loaderstate, ctx, loaderParams{ PackageOpts: opts, requirements: initialRS, @@ -404,7 +404,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma if opts.Tidy { if cfg.BuildV { - mg, _ := ld.requirements.Graph(ctx) + mg, _ := ld.requirements.Graph(loaderstate, ctx) for _, m := range initialRS.rootModules { var unused bool if ld.requirements.pruning == unpruned { @@ -416,7 +416,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // m is unused if it was dropped from the roots. If it is still present // as a transitive dependency, that transitive dependency is not needed // by any package or test in the main module. - _, ok := ld.requirements.rootSelected(m.Path) + _, ok := ld.requirements.rootSelected(loaderstate, m.Path) unused = !ok } if unused { @@ -425,9 +425,9 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma } } - keep := keepSums(ctx, ld, ld.requirements, loadedZipSumsOnly) + keep := keepSums(loaderstate, ctx, ld, ld.requirements, loadedZipSumsOnly) compatVersion := ld.TidyCompatibleVersion - goVersion := ld.requirements.GoVersion() + goVersion := ld.requirements.GoVersion(loaderstate) if compatVersion == "" { if gover.Compare(goVersion, gover.GoStrictVersion) < 0 { compatVersion = gover.Prev(goVersion) @@ -444,10 +444,10 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma compatVersion = goVersion } if compatPruning := pruningForGoVersion(compatVersion); compatPruning != ld.requirements.pruning { - compatRS := newRequirements(compatPruning, ld.requirements.rootModules, ld.requirements.direct) - ld.checkTidyCompatibility(ctx, compatRS, compatVersion) + compatRS := newRequirements(loaderstate, compatPruning, ld.requirements.rootModules, ld.requirements.direct) + ld.checkTidyCompatibility(loaderstate, ctx, compatRS, compatVersion) - for m := range keepSums(ctx, ld, compatRS, loadedZipSumsOnly) { + for m := range keepSums(loaderstate, ctx, ld, compatRS, loadedZipSumsOnly) { keep[m] = true } } @@ -455,8 +455,8 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma if opts.TidyDiff { cfg.BuildMod = "readonly" loaded = ld - requirements = loaded.requirements - currentGoMod, updatedGoMod, _, err := UpdateGoModFromReqs(ctx, WriteOpts{}) + loaderstate.requirements = loaded.requirements + currentGoMod, updatedGoMod, _, err := UpdateGoModFromReqs(loaderstate, ctx, WriteOpts{}) if err != nil { base.Fatal(err) } @@ -466,7 +466,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // Dropping compatibility for 1.16 may result in a strictly smaller go.sum. // Update the keep map with only the loaded.requirements. if gover.Compare(compatVersion, "1.16") > 0 { - keep = keepSums(ctx, loaded, requirements, addBuildListZipSums) + keep = keepSums(loaderstate, ctx, loaded, loaderstate.requirements, addBuildListZipSums) } currentGoSum, tidyGoSum := modfetch.TidyGoSum(keep) goSumDiff := diff.Diff("current/go.sum", currentGoSum, "tidy/go.sum", tidyGoSum) @@ -490,7 +490,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // loaded.requirements, but here we may have also loaded (and want to // preserve checksums for) additional entities from compatRS, which are // only needed for compatibility with ld.TidyCompatibleVersion. - if err := modfetch.WriteGoSum(ctx, keep, mustHaveCompleteRequirements()); err != nil { + if err := modfetch.WriteGoSum(ctx, keep, mustHaveCompleteRequirements(loaderstate)); err != nil { base.Fatal(err) } } @@ -505,7 +505,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // to call WriteGoMod itself) or if ResolveMissingImports is false (the // command wants to examine the package graph as-is). loaded = ld - requirements = loaded.requirements + loaderstate.requirements = loaded.requirements for _, pkg := range ld.pkgs { if !pkg.isTest() { @@ -515,7 +515,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma sort.Strings(loadedPackages) if !ExplicitWriteGoMod && opts.ResolveMissingImports { - if err := commitRequirements(ctx, WriteOpts{}); err != nil { + if err := commitRequirements(loaderstate, ctx, WriteOpts{}); err != nil { base.Fatal(err) } } @@ -525,7 +525,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // matchLocalDirs is like m.MatchDirs, but tries to avoid scanning directories // outside of the standard library and active modules. -func matchLocalDirs(ctx context.Context, modRoots []string, m *search.Match, rs *Requirements) { +func matchLocalDirs(loaderstate *State, ctx context.Context, modRoots []string, m *search.Match, rs *Requirements) { if !m.IsLocal() { panic(fmt.Sprintf("internal error: resolveLocalDirs on non-local pattern %s", m.Pattern())) } @@ -543,10 +543,10 @@ func matchLocalDirs(ctx context.Context, modRoots []string, m *search.Match, rs } modRoot := findModuleRoot(absDir) - if !slices.Contains(modRoots, modRoot) && search.InDir(absDir, cfg.GOROOTsrc) == "" && pathInModuleCache(ctx, absDir, rs) == "" { + if !slices.Contains(modRoots, modRoot) && search.InDir(absDir, cfg.GOROOTsrc) == "" && pathInModuleCache(loaderstate, ctx, absDir, rs) == "" { m.Dirs = []string{} scope := "main module or its selected dependencies" - if inWorkspaceMode() { + if loaderstate.inWorkspaceMode() { scope = "modules listed in go.work or their selected dependencies" } m.AddError(fmt.Errorf("directory prefix %s does not contain %s", base.ShortPath(absDir), scope)) @@ -558,7 +558,7 @@ func matchLocalDirs(ctx context.Context, modRoots []string, m *search.Match, rs } // resolveLocalPackage resolves a filesystem path to a package path. -func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (string, error) { +func resolveLocalPackage(loaderstate *State, ctx context.Context, dir string, rs *Requirements) (string, error) { var absDir string if filepath.IsAbs(dir) { absDir = filepath.Clean(dir) @@ -596,13 +596,13 @@ func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (str } } - for _, mod := range MainModules.Versions() { - modRoot := MainModules.ModRoot(mod) + for _, mod := range loaderstate.MainModules.Versions() { + modRoot := loaderstate.MainModules.ModRoot(mod) if modRoot != "" && absDir == modRoot { if absDir == cfg.GOROOTsrc { return "", errPkgIsGorootSrc } - return MainModules.PathPrefix(mod), nil + return loaderstate.MainModules.PathPrefix(mod), nil } } @@ -611,8 +611,8 @@ func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (str // It's not strictly necessary but helpful to keep the checks. var pkgNotFoundErr error pkgNotFoundLongestPrefix := "" - for _, mainModule := range MainModules.Versions() { - modRoot := MainModules.ModRoot(mainModule) + for _, mainModule := range loaderstate.MainModules.Versions() { + modRoot := loaderstate.MainModules.ModRoot(mainModule) if modRoot != "" && str.HasFilePathPrefix(absDir, modRoot) && !strings.Contains(absDir[len(modRoot):], "@") { suffix := filepath.ToSlash(str.TrimFilePathPrefix(absDir, modRoot)) if pkg, found := strings.CutPrefix(suffix, "vendor/"); found { @@ -620,14 +620,14 @@ func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (str return "", fmt.Errorf("without -mod=vendor, directory %s has no package path", absDir) } - readVendorList(VendorDir()) + readVendorList(VendorDir(loaderstate)) if _, ok := vendorPkgModule[pkg]; !ok { return "", fmt.Errorf("directory %s is not a package listed in vendor/modules.txt", absDir) } return pkg, nil } - mainModulePrefix := MainModules.PathPrefix(mainModule) + mainModulePrefix := loaderstate.MainModules.PathPrefix(mainModule) if mainModulePrefix == "" { pkg := suffix if pkg == "builtin" { @@ -668,13 +668,13 @@ func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (str return pkg, nil } - pkg := pathInModuleCache(ctx, absDir, rs) + pkg := pathInModuleCache(loaderstate, ctx, absDir, rs) if pkg == "" { dirstr := fmt.Sprintf("directory %s", base.ShortPath(absDir)) if dirstr == "directory ." { dirstr = "current directory" } - if inWorkspaceMode() { + if loaderstate.inWorkspaceMode() { if mr := findModuleRoot(absDir); mr != "" { return "", fmt.Errorf("%s is contained in a module that is not one of the workspace modules listed in go.work. You can add the module to the workspace using:\n\tgo work use %s", dirstr, base.ShortPath(mr)) } @@ -693,17 +693,17 @@ var ( // pathInModuleCache returns the import path of the directory dir, // if dir is in the module cache copy of a module in our build list. -func pathInModuleCache(ctx context.Context, dir string, rs *Requirements) string { +func pathInModuleCache(loaderstate *State, ctx context.Context, dir string, rs *Requirements) string { tryMod := func(m module.Version) (string, bool) { if gover.IsToolchain(m.Path) { return "", false } var root string var err error - if repl := Replacement(m); repl.Path != "" && repl.Version == "" { + if repl := Replacement(loaderstate, m); repl.Path != "" && repl.Version == "" { root = repl.Path if !filepath.IsAbs(root) { - root = filepath.Join(replaceRelativeTo(), root) + root = filepath.Join(replaceRelativeTo(loaderstate), root) } } else if repl.Path != "" { root, err = modfetch.DownloadDir(ctx, repl) @@ -728,7 +728,7 @@ func pathInModuleCache(ctx context.Context, dir string, rs *Requirements) string if rs.pruning == pruned { for _, m := range rs.rootModules { - if v, _ := rs.rootSelected(m.Path); v != m.Version { + if v, _ := rs.rootSelected(loaderstate, m.Path); v != m.Version { continue // m is a root, but we have a higher root for the same path. } if importPath, ok := tryMod(m); ok { @@ -747,7 +747,7 @@ func pathInModuleCache(ctx context.Context, dir string, rs *Requirements) string // versions of root modules may differ from what we already checked above. // Re-check those paths too. - mg, _ := rs.Graph(ctx) + mg, _ := rs.Graph(loaderstate, ctx) var importPath string for _, m := range mg.BuildList() { var found bool @@ -766,8 +766,8 @@ func pathInModuleCache(ctx context.Context, dir string, rs *Requirements) string // // TODO(bcmills): Silencing errors seems off. Take a closer look at this and // figure out what the error-reporting actually ought to be. -func ImportFromFiles(ctx context.Context, gofiles []string) { - rs := LoadModFile(ctx) +func ImportFromFiles(loaderstate *State, ctx context.Context, gofiles []string) { + rs := LoadModFile(loaderstate, ctx) tags := imports.Tags() imports, testImports, err := imports.ScanFiles(gofiles, tags) @@ -775,7 +775,7 @@ func ImportFromFiles(ctx context.Context, gofiles []string) { base.Fatal(err) } - loaded = loadFromRoots(ctx, loaderParams{ + loaded = loadFromRoots(loaderstate, ctx, loaderParams{ PackageOpts: PackageOpts{ Tags: tags, ResolveMissingImports: true, @@ -788,10 +788,10 @@ func ImportFromFiles(ctx context.Context, gofiles []string) { return roots }, }) - requirements = loaded.requirements + loaderstate.requirements = loaded.requirements if !ExplicitWriteGoMod { - if err := commitRequirements(ctx, WriteOpts{}); err != nil { + if err := commitRequirements(loaderstate, ctx, WriteOpts{}); err != nil { base.Fatal(err) } } @@ -799,11 +799,11 @@ func ImportFromFiles(ctx context.Context, gofiles []string) { // DirImportPath returns the effective import path for dir, // provided it is within a main module, or else returns ".". -func (mms *MainModuleSet) DirImportPath(ctx context.Context, dir string) (path string, m module.Version) { - if !HasModRoot() { +func (mms *MainModuleSet) DirImportPath(loaderstate *State, ctx context.Context, dir string) (path string, m module.Version) { + if !loaderstate.HasModRoot() { return ".", module.Version{} } - LoadModFile(ctx) // Sets targetPrefix. + LoadModFile(loaderstate, ctx) // Sets targetPrefix. if !filepath.IsAbs(dir) { dir = filepath.Join(base.Cwd(), dir) @@ -820,7 +820,7 @@ func (mms *MainModuleSet) DirImportPath(ctx context.Context, dir string) (path s return mms.PathPrefix(v), v } if str.HasFilePathPrefix(dir, modRoot) { - pathPrefix := MainModules.PathPrefix(v) + pathPrefix := loaderstate.MainModules.PathPrefix(v) if pathPrefix > longestPrefix { longestPrefix = pathPrefix longestPrefixVersion = v @@ -853,13 +853,13 @@ func PackageModule(path string) module.Version { // the package at path as imported from the package in parentDir. // Lookup requires that one of the Load functions in this package has already // been called. -func Lookup(parentPath string, parentIsStd bool, path string) (dir, realPath string, err error) { +func Lookup(loaderstate *State, parentPath string, parentIsStd bool, path string) (dir, realPath string, err error) { if path == "" { panic("Lookup called with empty package path") } if parentIsStd { - path = loaded.stdVendor(parentPath, path) + path = loaded.stdVendor(loaderstate, parentPath, path) } pkg, ok := loaded.pkgCache.Get(path) if !ok { @@ -957,11 +957,11 @@ func (ld *loader) exitIfErrors(ctx context.Context) { // goVersion reports the Go version that should be used for the loader's // requirements: ld.TidyGoVersion if set, or ld.requirements.GoVersion() // otherwise. -func (ld *loader) goVersion() string { +func (ld *loader) goVersion(loaderstate *State) string { if ld.TidyGoVersion != "" { return ld.TidyGoVersion } - return ld.requirements.GoVersion() + return ld.requirements.GoVersion(loaderstate) } // A loadPkg records information about a single loaded package. @@ -1064,11 +1064,11 @@ func (pkg *loadPkg) isTest() bool { // fromExternalModule reports whether pkg was loaded from a module other than // the main module. -func (pkg *loadPkg) fromExternalModule() bool { +func (pkg *loadPkg) fromExternalModule(loaderstate *State) bool { if pkg.mod.Path == "" { return false // loaded from the standard library, not a module } - return !MainModules.Contains(pkg.mod.Path) + return !loaderstate.MainModules.Contains(pkg.mod.Path) } var errMissing = errors.New("cannot find package") @@ -1079,7 +1079,7 @@ var errMissing = errors.New("cannot find package") // The set of root packages is returned by the params.listRoots function, and // expanded to the full set of packages by tracing imports (and possibly tests) // as needed. -func loadFromRoots(ctx context.Context, params loaderParams) *loader { +func loadFromRoots(loaderstate *State, ctx context.Context, params loaderParams) *loader { ld := &loader{ loaderParams: params, work: par.NewQueue(runtime.GOMAXPROCS(0)), @@ -1095,7 +1095,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { // spot-checks in modules that do not maintain the expanded go.mod // requirements needed for graph pruning. var err error - ld.requirements, _, err = expandGraph(ctx, ld.requirements) + ld.requirements, _, err = expandGraph(loaderstate, ctx, ld.requirements) if err != nil { ld.error(err) } @@ -1103,11 +1103,11 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { ld.exitIfErrors(ctx) updateGoVersion := func() { - goVersion := ld.goVersion() + goVersion := ld.goVersion(loaderstate) if ld.requirements.pruning != workspace { var err error - ld.requirements, err = convertPruning(ctx, ld.requirements, pruningForGoVersion(goVersion)) + ld.requirements, err = convertPruning(loaderstate, ctx, ld.requirements, pruningForGoVersion(goVersion)) if err != nil { ld.error(err) ld.exitIfErrors(ctx) @@ -1141,7 +1141,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { // set of root packages does not change then we can select the correct // versions of all transitive imports on the first try and complete // loading in a single iteration. - changedBuildList := ld.preloadRootModules(ctx, rootPkgs) + changedBuildList := ld.preloadRootModules(loaderstate, ctx, rootPkgs) if changedBuildList { // The build list has changed, so the set of root packages may have also // changed. Start over to pick up the changes. (Preloading roots is much @@ -1154,7 +1154,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { inRoots := map[*loadPkg]bool{} for _, path := range rootPkgs { - root := ld.pkg(ctx, path, pkgIsRoot) + root := ld.pkg(loaderstate, ctx, path, pkgIsRoot) if !inRoots[root] { ld.roots = append(ld.roots, root) inRoots[root] = true @@ -1170,7 +1170,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { ld.buildStacks() - changed, err := ld.updateRequirements(ctx) + changed, err := ld.updateRequirements(loaderstate, ctx) if err != nil { ld.error(err) break @@ -1184,12 +1184,12 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { continue } - if !ld.ResolveMissingImports || (!HasModRoot() && !allowMissingModuleImports) { + if !ld.ResolveMissingImports || (!loaderstate.HasModRoot() && !loaderstate.allowMissingModuleImports) { // We've loaded as much as we can without resolving missing imports. break } - modAddedBy, err := ld.resolveMissingImports(ctx) + modAddedBy, err := ld.resolveMissingImports(loaderstate, ctx) if err != nil { ld.error(err) break @@ -1216,7 +1216,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { // iteration so we don't need to also update it here. (That would waste time // computing a "direct" map that we'll have to recompute later anyway.) direct := ld.requirements.direct - rs, err := updateRoots(ctx, direct, ld.requirements, noPkgs, toAdd, ld.AssumeRootsImported) + rs, err := updateRoots(loaderstate, ctx, direct, ld.requirements, noPkgs, toAdd, ld.AssumeRootsImported) if err != nil { // If an error was found in a newly added module, report the package // import stack instead of the module requirement stack. Packages @@ -1244,7 +1244,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { // Tidy the build list, if applicable, before we report errors. // (The process of tidying may remove errors from irrelevant dependencies.) if ld.Tidy { - rs, err := tidyRoots(ctx, ld.requirements, ld.pkgs) + rs, err := tidyRoots(loaderstate, ctx, ld.requirements, ld.pkgs) if err != nil { ld.error(err) } else { @@ -1252,8 +1252,8 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { // Attempt to switch to the requested Go version. We have been using its // pruning and semantics all along, but there may have been — and may // still be — requirements on higher versions in the graph. - tidy := overrideRoots(ctx, rs, []module.Version{{Path: "go", Version: ld.TidyGoVersion}}) - mg, err := tidy.Graph(ctx) + tidy := overrideRoots(loaderstate, ctx, rs, []module.Version{{Path: "go", Version: ld.TidyGoVersion}}) + mg, err := tidy.Graph(loaderstate, ctx) if err != nil { ld.error(err) } @@ -1285,7 +1285,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { if m.Path == "go" && ld.TidyGoVersion != "" { continue } - if v, ok := ld.requirements.rootSelected(m.Path); !ok || v != m.Version { + if v, ok := ld.requirements.rootSelected(loaderstate, m.Path); !ok || v != m.Version { ld.error(fmt.Errorf("internal error: a requirement on %v is needed but was not added during package loading (selected %s)", m, v)) } } @@ -1304,7 +1304,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { } // Add importer information to checksum errors. - if sumErr := (*ImportMissingSumError)(nil); errors.As(pkg.err, &sumErr) { + if sumErr, ok := errors.AsType[*ImportMissingSumError](pkg.err); ok { if importer := pkg.stack; importer != nil { sumErr.importer = importer.path sumErr.importerVersion = importer.mod.Version @@ -1312,7 +1312,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { } } - if stdErr := (*ImportMissingError)(nil); errors.As(pkg.err, &stdErr) && stdErr.isStd { + if stdErr, ok := errors.AsType[*ImportMissingError](pkg.err); ok && stdErr.isStd { // Add importer go version information to import errors of standard // library packages arising from newer releases. if importer := pkg.stack; importer != nil { @@ -1334,7 +1334,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { ld.error(fmt.Errorf("%s: %w", pkg.stackText(), pkg.err)) } - ld.checkMultiplePaths() + ld.checkMultiplePaths(loaderstate) return ld } @@ -1357,7 +1357,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { // The "changed" return value reports whether the update changed the selected // version of any module that either provided a loaded package or may now // provide a package that was previously unresolved. -func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err error) { +func (ld *loader) updateRequirements(loaderstate *State, ctx context.Context) (changed bool, err error) { rs := ld.requirements // direct contains the set of modules believed to provide packages directly @@ -1384,22 +1384,22 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err var maxTooNew *gover.TooNewError for _, pkg := range ld.pkgs { if pkg.err != nil { - if tooNew := (*gover.TooNewError)(nil); errors.As(pkg.err, &tooNew) { + if tooNew, ok := errors.AsType[*gover.TooNewError](pkg.err); ok { if maxTooNew == nil || gover.Compare(tooNew.GoVersion, maxTooNew.GoVersion) > 0 { maxTooNew = tooNew } } } - if pkg.mod.Version != "" || !MainModules.Contains(pkg.mod.Path) { + if pkg.mod.Version != "" || !loaderstate.MainModules.Contains(pkg.mod.Path) { continue } for _, dep := range pkg.imports { - if !dep.fromExternalModule() { + if !dep.fromExternalModule(loaderstate) { continue } - if inWorkspaceMode() { + if loaderstate.inWorkspaceMode() { // In workspace mode / workspace pruning mode, the roots are the main modules // rather than the main module's direct dependencies. The check below on the selected // roots does not apply. @@ -1412,7 +1412,7 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err // of the vendor directory anyway. continue } - if mg, err := rs.Graph(ctx); err != nil { + if mg, err := rs.Graph(loaderstate, ctx); err != nil { return false, err } else if _, ok := mg.RequiredBy(dep.mod); !ok { // dep.mod is not an explicit dependency, but needs to be. @@ -1424,7 +1424,7 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err } } } else if pkg.err == nil && cfg.BuildMod != "mod" { - if v, ok := rs.rootSelected(dep.mod.Path); !ok || v != dep.mod.Version { + if v, ok := rs.rootSelected(loaderstate, dep.mod.Path); !ok || v != dep.mod.Version { // dep.mod is not an explicit dependency, but needs to be. // Because we are not in "mod" mode, we will not be able to update it. // Instead, mark the importing package with an error. @@ -1490,21 +1490,21 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err // roots can only increase and the set of roots can only expand. The set // of extant root paths is finite and the set of versions of each path is // finite, so the iteration *must* reach a stable fixed-point. - tidy, err := tidyRoots(ctx, rs, ld.pkgs) + tidy, err := tidyRoots(loaderstate, ctx, rs, ld.pkgs) if err != nil { return false, err } addRoots = tidy.rootModules } - rs, err = updateRoots(ctx, direct, rs, ld.pkgs, addRoots, ld.AssumeRootsImported) + rs, err = updateRoots(loaderstate, ctx, direct, rs, ld.pkgs, addRoots, ld.AssumeRootsImported) if err != nil { // We don't actually know what even the root requirements are supposed to be, // so we can't proceed with loading. Return the error to the caller return false, err } - if rs.GoVersion() != ld.requirements.GoVersion() { + if rs.GoVersion(loaderstate) != ld.requirements.GoVersion(loaderstate) { // A change in the selected Go version may or may not affect the set of // loaded packages, but in some cases it can change the meaning of the "all" // pattern, the level of pruning in the module graph, and even the set of @@ -1515,12 +1515,12 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err // The roots of the module graph have changed in some way (not just the // "direct" markings). Check whether the changes affected any of the loaded // packages. - mg, err := rs.Graph(ctx) + mg, err := rs.Graph(loaderstate, ctx) if err != nil { return false, err } for _, pkg := range ld.pkgs { - if pkg.fromExternalModule() && mg.Selected(pkg.mod.Path) != pkg.mod.Version { + if pkg.fromExternalModule(loaderstate) && mg.Selected(pkg.mod.Path) != pkg.mod.Version { changed = true break } @@ -1540,7 +1540,7 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err // // In some sense, we can think of this as ‘upgraded the module providing // pkg.path from "none" to a version higher than "none"’. - if _, _, _, _, err = importFromModules(ctx, pkg.path, rs, nil, ld.skipImportModFiles); err == nil { + if _, _, _, _, err = importFromModules(loaderstate, ctx, pkg.path, rs, nil, ld.skipImportModFiles); err == nil { changed = true break } @@ -1558,7 +1558,7 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err // The newly-resolved packages are added to the addedModuleFor map, and // resolveMissingImports returns a map from each new module version to // the first missing package that module would resolve. -func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[module.Version]*loadPkg, err error) { +func (ld *loader) resolveMissingImports(loaderstate *State, ctx context.Context) (modAddedBy map[module.Version]*loadPkg, err error) { type pkgMod struct { pkg *loadPkg mod *module.Version @@ -1573,7 +1573,7 @@ func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[mod // we should only add the missing import once. continue } - if !errors.As(pkg.err, new(*ImportMissingError)) { + if _, ok := errors.AsType[*ImportMissingError](pkg.err); !ok { // Leave other errors for Import or load.Packages to report. continue } @@ -1582,13 +1582,13 @@ func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[mod var mod module.Version ld.work.Add(func() { var err error - mod, err = queryImport(ctx, pkg.path, ld.requirements) + mod, err = queryImport(loaderstate, ctx, pkg.path, ld.requirements) if err != nil { - var ime *ImportMissingError - if errors.As(err, &ime) { + if ime, ok := errors.AsType[*ImportMissingError](err); ok { for curstack := pkg.stack; curstack != nil; curstack = curstack.stack { - if MainModules.Contains(curstack.mod.Path) { + if loaderstate.MainModules.Contains(curstack.mod.Path) { ime.ImportingMainModule = curstack.mod + ime.modRoot = loaderstate.MainModules.ModRoot(ime.ImportingMainModule) break } } @@ -1625,7 +1625,7 @@ func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[mod maxTooNewPkg *loadPkg ) for _, pm := range pkgMods { - if tooNew := (*gover.TooNewError)(nil); errors.As(pm.pkg.err, &tooNew) { + if tooNew, ok := errors.AsType[*gover.TooNewError](pm.pkg.err); ok { if maxTooNew == nil || gover.Compare(tooNew.GoVersion, maxTooNew.GoVersion) > 0 { maxTooNew = tooNew maxTooNewPkg = pm.pkg @@ -1659,7 +1659,7 @@ func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[mod // ld.work queue, and its test (if requested) will also be populated once // imports have been resolved. When ld.work goes idle, all transitive imports of // the requested package (and its test, if requested) will have been loaded. -func (ld *loader) pkg(ctx context.Context, path string, flags loadPkgFlags) *loadPkg { +func (ld *loader) pkg(loaderstate *State, ctx context.Context, path string, flags loadPkgFlags) *loadPkg { if flags.has(pkgImportsLoaded) { panic("internal error: (*loader).pkg called with pkgImportsLoaded flag set") } @@ -1668,20 +1668,20 @@ func (ld *loader) pkg(ctx context.Context, path string, flags loadPkgFlags) *loa pkg := &loadPkg{ path: path, } - ld.applyPkgFlags(ctx, pkg, flags) + ld.applyPkgFlags(loaderstate, ctx, pkg, flags) - ld.work.Add(func() { ld.load(ctx, pkg) }) + ld.work.Add(func() { ld.load(loaderstate, ctx, pkg) }) return pkg }) - ld.applyPkgFlags(ctx, pkg, flags) + ld.applyPkgFlags(loaderstate, ctx, pkg, flags) return pkg } // applyPkgFlags updates pkg.flags to set the given flags and propagate the // (transitive) effects of those flags, possibly loading or enqueueing further // packages as a result. -func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkgFlags) { +func (ld *loader) applyPkgFlags(loaderstate *State, ctx context.Context, pkg *loadPkg, flags loadPkgFlags) { if flags == 0 { return } @@ -1709,7 +1709,7 @@ func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkg // so it's ok if we call it more than is strictly necessary. wantTest := false switch { - case ld.allPatternIsRoot && MainModules.Contains(pkg.mod.Path): + case ld.allPatternIsRoot && loaderstate.MainModules.Contains(pkg.mod.Path): // We are loading the "all" pattern, which includes packages imported by // tests in the main module. This package is in the main module, so we // need to identify the imports of its test even if LoadTests is not set. @@ -1730,13 +1730,13 @@ func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkg if wantTest { var testFlags loadPkgFlags - if MainModules.Contains(pkg.mod.Path) || (ld.allClosesOverTests && new.has(pkgInAll)) { + if loaderstate.MainModules.Contains(pkg.mod.Path) || (ld.allClosesOverTests && new.has(pkgInAll)) { // Tests of packages in the main module are in "all", in the sense that // they cause the packages they import to also be in "all". So are tests // of packages in "all" if "all" closes over test dependencies. testFlags |= pkgInAll } - ld.pkgTest(ctx, pkg, testFlags) + ld.pkgTest(loaderstate, ctx, pkg, testFlags) } } @@ -1744,13 +1744,13 @@ func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkg // We have just marked pkg with pkgInAll, or we have just loaded its // imports, or both. Now is the time to propagate pkgInAll to the imports. for _, dep := range pkg.imports { - ld.applyPkgFlags(ctx, dep, pkgInAll) + ld.applyPkgFlags(loaderstate, ctx, dep, pkgInAll) } } if new.has(pkgFromRoot) && !old.has(pkgFromRoot|pkgImportsLoaded) { for _, dep := range pkg.imports { - ld.applyPkgFlags(ctx, dep, pkgFromRoot) + ld.applyPkgFlags(loaderstate, ctx, dep, pkgFromRoot) } } } @@ -1758,7 +1758,7 @@ func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkg // preloadRootModules loads the module requirements needed to identify the // selected version of each module providing a package in rootPkgs, // adding new root modules to the module graph if needed. -func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (changedBuildList bool) { +func (ld *loader) preloadRootModules(loaderstate *State, ctx context.Context, rootPkgs []string) (changedBuildList bool) { needc := make(chan map[module.Version]bool, 1) needc <- map[module.Version]bool{} for _, path := range rootPkgs { @@ -1769,13 +1769,12 @@ func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (ch // If the main module is tidy and the package is in "all" — or if we're // lucky — we can identify all of its imports without actually loading the // full module graph. - m, _, _, _, err := importFromModules(ctx, path, ld.requirements, nil, ld.skipImportModFiles) + m, _, _, _, err := importFromModules(loaderstate, ctx, path, ld.requirements, nil, ld.skipImportModFiles) if err != nil { - var missing *ImportMissingError - if errors.As(err, &missing) && ld.ResolveMissingImports { + if _, ok := errors.AsType[*ImportMissingError](err); ok && ld.ResolveMissingImports { // This package isn't provided by any selected module. // If we can find it, it will be a new root dependency. - m, err = queryImport(ctx, path, ld.requirements) + m, err = queryImport(loaderstate, ctx, path, ld.requirements) } if err != nil { // We couldn't identify the root module containing this package. @@ -1788,7 +1787,7 @@ func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (ch return } - v, ok := ld.requirements.rootSelected(m.Path) + v, ok := ld.requirements.rootSelected(loaderstate, m.Path) if !ok || v != m.Version { // We found the requested package in m, but m is not a root, so // loadModGraph will not load its requirements. We need to promote the @@ -1816,7 +1815,7 @@ func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (ch } gover.ModSort(toAdd) - rs, err := updateRoots(ctx, ld.requirements.direct, ld.requirements, nil, toAdd, ld.AssumeRootsImported) + rs, err := updateRoots(loaderstate, ctx, ld.requirements.direct, ld.requirements, nil, toAdd, ld.AssumeRootsImported) if err != nil { // We are missing some root dependency, and for some reason we can't load // enough of the module dependency graph to add the missing root. Package @@ -1838,11 +1837,11 @@ func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (ch } // load loads an individual package. -func (ld *loader) load(ctx context.Context, pkg *loadPkg) { +func (ld *loader) load(loaderstate *State, ctx context.Context, pkg *loadPkg) { var mg *ModuleGraph if ld.requirements.pruning == unpruned { var err error - mg, err = ld.requirements.Graph(ctx) + mg, err = ld.requirements.Graph(loaderstate, ctx) if err != nil { // We already checked the error from Graph in loadFromRoots and/or // updateRequirements, so we ignored the error on purpose and we should @@ -1857,17 +1856,17 @@ func (ld *loader) load(ctx context.Context, pkg *loadPkg) { } var modroot string - pkg.mod, modroot, pkg.dir, pkg.altMods, pkg.err = importFromModules(ctx, pkg.path, ld.requirements, mg, ld.skipImportModFiles) - if MainModules.Tools()[pkg.path] { + pkg.mod, modroot, pkg.dir, pkg.altMods, pkg.err = importFromModules(loaderstate, ctx, pkg.path, ld.requirements, mg, ld.skipImportModFiles) + if loaderstate.MainModules.Tools()[pkg.path] { // Tools declared by main modules are always in "all". // We apply the package flags before returning so that missing // tool dependencies report an error https://go.dev/issue/70582 - ld.applyPkgFlags(ctx, pkg, pkgInAll) + ld.applyPkgFlags(loaderstate, ctx, pkg, pkgInAll) } if pkg.dir == "" { return } - if MainModules.Contains(pkg.mod.Path) { + if loaderstate.MainModules.Contains(pkg.mod.Path) { // Go ahead and mark pkg as in "all". This provides the invariant that a // package that is *only* imported by other packages in "all" is always // marked as such before loading its imports. @@ -1877,7 +1876,7 @@ func (ld *loader) load(ctx context.Context, pkg *loadPkg) { // about (by reducing churn on the flag bits of dependencies), and costs // essentially nothing (these atomic flag ops are essentially free compared // to scanning source code for imports). - ld.applyPkgFlags(ctx, pkg, pkgInAll) + ld.applyPkgFlags(loaderstate, ctx, pkg, pkgInAll) } if ld.AllowPackage != nil { if err := ld.AllowPackage(ctx, pkg.path, pkg.mod); err != nil { @@ -1909,13 +1908,13 @@ func (ld *loader) load(ctx context.Context, pkg *loadPkg) { if pkg.inStd { // Imports from packages in "std" and "cmd" should resolve using // GOROOT/src/vendor even when "std" is not the main module. - path = ld.stdVendor(pkg.path, path) + path = ld.stdVendor(loaderstate, pkg.path, path) } - pkg.imports = append(pkg.imports, ld.pkg(ctx, path, importFlags)) + pkg.imports = append(pkg.imports, ld.pkg(loaderstate, ctx, path, importFlags)) } pkg.testImports = testImports - ld.applyPkgFlags(ctx, pkg, pkgImportsLoaded) + ld.applyPkgFlags(loaderstate, ctx, pkg, pkgImportsLoaded) } // pkgTest locates the test of pkg, creating it if needed, and updates its state @@ -1923,7 +1922,7 @@ func (ld *loader) load(ctx context.Context, pkg *loadPkg) { // // pkgTest requires that the imports of pkg have already been loaded (flagged // with pkgImportsLoaded). -func (ld *loader) pkgTest(ctx context.Context, pkg *loadPkg, testFlags loadPkgFlags) *loadPkg { +func (ld *loader) pkgTest(loaderstate *State, ctx context.Context, pkg *loadPkg, testFlags loadPkgFlags) *loadPkg { if pkg.isTest() { panic("pkgTest called on a test package") } @@ -1938,7 +1937,7 @@ func (ld *loader) pkgTest(ctx context.Context, pkg *loadPkg, testFlags loadPkgFl err: pkg.err, inStd: pkg.inStd, } - ld.applyPkgFlags(ctx, pkg.test, testFlags) + ld.applyPkgFlags(loaderstate, ctx, pkg.test, testFlags) createdTest = true }) @@ -1951,14 +1950,14 @@ func (ld *loader) pkgTest(ctx context.Context, pkg *loadPkg, testFlags loadPkgFl } for _, path := range pkg.testImports { if pkg.inStd { - path = ld.stdVendor(test.path, path) + path = ld.stdVendor(loaderstate, test.path, path) } - test.imports = append(test.imports, ld.pkg(ctx, path, importFlags)) + test.imports = append(test.imports, ld.pkg(loaderstate, ctx, path, importFlags)) } pkg.testImports = nil - ld.applyPkgFlags(ctx, test, pkgImportsLoaded) + ld.applyPkgFlags(loaderstate, ctx, test, pkgImportsLoaded) } else { - ld.applyPkgFlags(ctx, test, testFlags) + ld.applyPkgFlags(loaderstate, ctx, test, testFlags) } return test @@ -1966,7 +1965,7 @@ func (ld *loader) pkgTest(ctx context.Context, pkg *loadPkg, testFlags loadPkgFl // stdVendor returns the canonical import path for the package with the given // path when imported from the standard-library package at parentPath. -func (ld *loader) stdVendor(parentPath, path string) string { +func (ld *loader) stdVendor(loaderstate *State, parentPath, path string) string { if p, _, ok := fips140.ResolveImport(path); ok { return p } @@ -1975,14 +1974,14 @@ func (ld *loader) stdVendor(parentPath, path string) string { } if str.HasPathPrefix(parentPath, "cmd") { - if !ld.VendorModulesInGOROOTSrc || !MainModules.Contains("cmd") { + if !ld.VendorModulesInGOROOTSrc || !loaderstate.MainModules.Contains("cmd") { vendorPath := pathpkg.Join("cmd", "vendor", path) if _, err := os.Stat(filepath.Join(cfg.GOROOTsrc, filepath.FromSlash(vendorPath))); err == nil { return vendorPath } } - } else if !ld.VendorModulesInGOROOTSrc || !MainModules.Contains("std") || str.HasPathPrefix(parentPath, "vendor") { + } else if !ld.VendorModulesInGOROOTSrc || !loaderstate.MainModules.Contains("std") || str.HasPathPrefix(parentPath, "vendor") { // If we are outside of the 'std' module, resolve imports from within 'std' // to the vendor directory. // @@ -2028,7 +2027,7 @@ func (ld *loader) computePatternAll() (all []string) { // or as a replacement for another module, but not both at the same time. // // (See https://golang.org/issue/26607 and https://golang.org/issue/34650.) -func (ld *loader) checkMultiplePaths() { +func (ld *loader) checkMultiplePaths(loaderstate *State) { mods := ld.requirements.rootModules if cached := ld.requirements.graph.Load(); cached != nil { if mg := cached.mg; mg != nil { @@ -2038,7 +2037,7 @@ func (ld *loader) checkMultiplePaths() { firstPath := map[module.Version]string{} for _, mod := range mods { - src := resolveReplacement(mod) + src := resolveReplacement(loaderstate, mod) if prev, ok := firstPath[src]; !ok { firstPath[src] = mod.Path } else if prev != mod.Path { @@ -2049,8 +2048,8 @@ func (ld *loader) checkMultiplePaths() { // checkTidyCompatibility emits an error if any package would be loaded from a // different module under rs than under ld.requirements. -func (ld *loader) checkTidyCompatibility(ctx context.Context, rs *Requirements, compatVersion string) { - goVersion := rs.GoVersion() +func (ld *loader) checkTidyCompatibility(loaderstate *State, ctx context.Context, rs *Requirements, compatVersion string) { + goVersion := rs.GoVersion(loaderstate) suggestUpgrade := false suggestEFlag := false suggestFixes := func() { @@ -2067,7 +2066,7 @@ func (ld *loader) checkTidyCompatibility(ctx context.Context, rs *Requirements, fmt.Fprintln(os.Stderr) goFlag := "" - if goVersion != MainModules.GoVersion() { + if goVersion != loaderstate.MainModules.GoVersion(loaderstate) { goFlag = " -go=" + goVersion } @@ -2096,7 +2095,7 @@ func (ld *loader) checkTidyCompatibility(ctx context.Context, rs *Requirements, fmt.Fprintf(os.Stderr, "For information about 'go mod tidy' compatibility, see:\n\thttps://go.dev/ref/mod#graph-pruning\n") } - mg, err := rs.Graph(ctx) + mg, err := rs.Graph(loaderstate, ctx) if err != nil { ld.error(fmt.Errorf("error loading go %s module graph: %w", compatVersion, err)) ld.switchIfErrors(ctx) @@ -2134,7 +2133,7 @@ func (ld *loader) checkTidyCompatibility(ctx context.Context, rs *Requirements, pkg := pkg ld.work.Add(func() { - mod, _, _, _, err := importFromModules(ctx, pkg.path, rs, mg, ld.skipImportModFiles) + mod, _, _, _, err := importFromModules(loaderstate, ctx, pkg.path, rs, mg, ld.skipImportModFiles) if mod != pkg.mod { mismatches := <-mismatchMu mismatches[pkg] = mismatch{mod: mod, err: err} @@ -2196,14 +2195,14 @@ func (ld *loader) checkTidyCompatibility(ctx context.Context, rs *Requirements, // module that previously provided the package to a version that no // longer does, or to a version for which the module source code (but // not the go.mod file in isolation) has a checksum error. - if missing := (*ImportMissingError)(nil); errors.As(mismatch.err, &missing) { + if _, ok := errors.AsType[*ImportMissingError](mismatch.err); ok { selected := module.Version{ Path: pkg.mod.Path, Version: mg.Selected(pkg.mod.Path), } ld.error(fmt.Errorf("%s loaded from %v,\n\tbut go %s would fail to locate it in %s", pkg.stackText(), pkg.mod, compatVersion, selected)) } else { - if ambiguous := (*AmbiguousImportError)(nil); errors.As(mismatch.err, &ambiguous) { + if _, ok := errors.AsType[*AmbiguousImportError](mismatch.err); ok { // TODO: Is this check needed? } ld.error(fmt.Errorf("%s loaded from %v,\n\tbut go %s would fail to locate it:\n\t%v", pkg.stackText(), pkg.mod, compatVersion, mismatch.err)) diff --git a/src/cmd/go/internal/modload/modfile.go b/src/cmd/go/internal/modload/modfile.go index 04e204cc984..7191833a0dc 100644 --- a/src/cmd/go/internal/modload/modfile.go +++ b/src/cmd/go/internal/modload/modfile.go @@ -76,8 +76,7 @@ func ReadModFile(gomod string, fix modfile.VersionFixer) (data []byte, f *modfil } func shortPathErrorList(err error) error { - var el modfile.ErrorList - if errors.As(err, &el) { + if el, ok := errors.AsType[modfile.ErrorList](err); ok { for i := range el { el[i].Filename = base.ShortPath(el[i].Filename) } @@ -139,11 +138,11 @@ func pruningForGoVersion(goVersion string) modPruning { // CheckAllowed returns an error equivalent to ErrDisallowed if m is excluded by // the main module's go.mod or retracted by its author. Most version queries use // this to filter out versions that should not be used. -func CheckAllowed(ctx context.Context, m module.Version) error { - if err := CheckExclusions(ctx, m); err != nil { +func (s *State) CheckAllowed(ctx context.Context, m module.Version) error { + if err := s.CheckExclusions(ctx, m); err != nil { return err } - if err := CheckRetractions(ctx, m); err != nil { + if err := s.CheckRetractions(ctx, m); err != nil { return err } return nil @@ -155,9 +154,9 @@ var ErrDisallowed = errors.New("disallowed module version") // CheckExclusions returns an error equivalent to ErrDisallowed if module m is // excluded by the main module's go.mod file. -func CheckExclusions(ctx context.Context, m module.Version) error { - for _, mainModule := range MainModules.Versions() { - if index := MainModules.Index(mainModule); index != nil && index.exclude[m] { +func (s *State) CheckExclusions(ctx context.Context, m module.Version) error { + for _, mainModule := range s.MainModules.Versions() { + if index := s.MainModules.Index(mainModule); index != nil && index.exclude[m] { return module.VersionError(m, errExcluded) } } @@ -173,14 +172,17 @@ func (e *excludedError) Is(err error) bool { return err == ErrDisallowed } // CheckRetractions returns an error if module m has been retracted by // its author. -func CheckRetractions(ctx context.Context, m module.Version) (err error) { +func (s *State) CheckRetractions(ctx context.Context, m module.Version) (err error) { defer func() { - if retractErr := (*ModuleRetractedError)(nil); err == nil || errors.As(err, &retractErr) { + if err == nil { + return + } + if _, ok := errors.AsType[*ModuleRetractedError](err); ok { return } // Attribute the error to the version being checked, not the version from // which the retractions were to be loaded. - if mErr := (*module.ModuleError)(nil); errors.As(err, &mErr) { + if mErr, ok := errors.AsType[*module.ModuleError](err); ok { err = mErr.Err } err = &retractionLoadingError{m: m, err: err} @@ -191,7 +193,7 @@ func CheckRetractions(ctx context.Context, m module.Version) (err error) { // Cannot be retracted. return nil } - if repl := Replacement(module.Version{Path: m.Path}); repl.Path != "" { + if repl := Replacement(s, module.Version{Path: m.Path}); repl.Path != "" { // All versions of the module were replaced. // Don't load retractions, since we'd just load the replacement. return nil @@ -208,11 +210,11 @@ func CheckRetractions(ctx context.Context, m module.Version) (err error) { // We load the raw file here: the go.mod file may have a different module // path that we expect if the module or its repository was renamed. // We still want to apply retractions to other aliases of the module. - rm, err := queryLatestVersionIgnoringRetractions(ctx, m.Path) + rm, err := queryLatestVersionIgnoringRetractions(s, ctx, m.Path) if err != nil { return err } - summary, err := rawGoModSummary(rm) + summary, err := rawGoModSummary(s, rm) if err != nil && !errors.Is(err, gover.ErrTooNew) { return err } @@ -298,7 +300,7 @@ func ShortMessage(message, emptyDefault string) string { // // CheckDeprecation returns an error if the message can't be loaded. // CheckDeprecation returns "", nil if there is no deprecation message. -func CheckDeprecation(ctx context.Context, m module.Version) (deprecation string, err error) { +func CheckDeprecation(loaderstate *State, ctx context.Context, m module.Version) (deprecation string, err error) { defer func() { if err != nil { err = fmt.Errorf("loading deprecation for %s: %w", m.Path, err) @@ -310,17 +312,17 @@ func CheckDeprecation(ctx context.Context, m module.Version) (deprecation string // Don't look up deprecation. return "", nil } - if repl := Replacement(module.Version{Path: m.Path}); repl.Path != "" { + if repl := Replacement(loaderstate, module.Version{Path: m.Path}); repl.Path != "" { // All versions of the module were replaced. // We'll look up deprecation separately for the replacement. return "", nil } - latest, err := queryLatestVersionIgnoringRetractions(ctx, m.Path) + latest, err := queryLatestVersionIgnoringRetractions(loaderstate, ctx, m.Path) if err != nil { return "", err } - summary, err := rawGoModSummary(latest) + summary, err := rawGoModSummary(loaderstate, latest) if err != nil && !errors.Is(err, gover.ErrTooNew) { return "", err } @@ -340,28 +342,28 @@ func replacement(mod module.Version, replace map[module.Version]module.Version) // Replacement returns the replacement for mod, if any. If the path in the // module.Version is relative it's relative to the single main module outside // workspace mode, or the workspace's directory in workspace mode. -func Replacement(mod module.Version) module.Version { - r, foundModRoot, _ := replacementFrom(mod) - return canonicalizeReplacePath(r, foundModRoot) +func Replacement(loaderstate *State, mod module.Version) module.Version { + r, foundModRoot, _ := replacementFrom(loaderstate, mod) + return canonicalizeReplacePath(loaderstate, r, foundModRoot) } // replacementFrom returns the replacement for mod, if any, the modroot of the replacement if it appeared in a go.mod, // and the source of the replacement. The replacement is relative to the go.work or go.mod file it appears in. -func replacementFrom(mod module.Version) (r module.Version, modroot string, fromFile string) { +func replacementFrom(loaderstate *State, mod module.Version) (r module.Version, modroot string, fromFile string) { foundFrom, found, foundModRoot := "", module.Version{}, "" - if MainModules == nil { + if loaderstate.MainModules == nil { return module.Version{}, "", "" - } else if MainModules.Contains(mod.Path) && mod.Version == "" { + } else if loaderstate.MainModules.Contains(mod.Path) && mod.Version == "" { // Don't replace the workspace version of the main module. return module.Version{}, "", "" } - if _, r, ok := replacement(mod, MainModules.WorkFileReplaceMap()); ok { - return r, "", workFilePath + if _, r, ok := replacement(mod, loaderstate.MainModules.WorkFileReplaceMap()); ok { + return r, "", loaderstate.workFilePath } - for _, v := range MainModules.Versions() { - if index := MainModules.Index(v); index != nil { + for _, v := range loaderstate.MainModules.Versions() { + if index := loaderstate.MainModules.Index(v); index != nil { if from, r, ok := replacement(mod, index.replace); ok { - modRoot := MainModules.ModRoot(v) + modRoot := loaderstate.MainModules.ModRoot(v) if foundModRoot != "" && foundFrom != from && found != r { base.Errorf("conflicting replacements found for %v in workspace modules defined by %v and %v", mod, modFilePath(foundModRoot), modFilePath(modRoot)) @@ -374,21 +376,21 @@ func replacementFrom(mod module.Version) (r module.Version, modroot string, from return found, foundModRoot, modFilePath(foundModRoot) } -func replaceRelativeTo() string { - if workFilePath := WorkFilePath(); workFilePath != "" { +func replaceRelativeTo(loaderstate *State) string { + if workFilePath := WorkFilePath(loaderstate); workFilePath != "" { return filepath.Dir(workFilePath) } - return MainModules.ModRoot(MainModules.mustGetSingleMainModule()) + return loaderstate.MainModules.ModRoot(loaderstate.MainModules.mustGetSingleMainModule(loaderstate)) } // canonicalizeReplacePath ensures that relative, on-disk, replaced module paths // are relative to the workspace directory (in workspace mode) or to the module's // directory (in module mode, as they already are). -func canonicalizeReplacePath(r module.Version, modRoot string) module.Version { +func canonicalizeReplacePath(loaderstate *State, r module.Version, modRoot string) module.Version { if filepath.IsAbs(r.Path) || r.Version != "" || modRoot == "" { return r } - workFilePath := WorkFilePath() + workFilePath := WorkFilePath(loaderstate) if workFilePath == "" { return r } @@ -405,8 +407,8 @@ func canonicalizeReplacePath(r module.Version, modRoot string) module.Version { // for m: either m itself, or the replacement for m (iff m is replaced). // It also returns the modroot of the module providing the replacement if // one was found. -func resolveReplacement(m module.Version) module.Version { - if r := Replacement(m); r.Path != "" { +func resolveReplacement(loaderstate *State, m module.Version) module.Version { + if r := Replacement(loaderstate, m); r.Path != "" { return r } return m @@ -571,12 +573,12 @@ type retraction struct { // module versions. // // The caller must not modify the returned summary. -func goModSummary(m module.Version) (*modFileSummary, error) { - if m.Version == "" && !inWorkspaceMode() && MainModules.Contains(m.Path) { +func goModSummary(loaderstate *State, m module.Version) (*modFileSummary, error) { + if m.Version == "" && !loaderstate.inWorkspaceMode() && loaderstate.MainModules.Contains(m.Path) { panic("internal error: goModSummary called on a main module") } if gover.IsToolchain(m.Path) { - return rawGoModSummary(m) + return rawGoModSummary(loaderstate, m) } if cfg.BuildMod == "vendor" { @@ -584,7 +586,7 @@ func goModSummary(m module.Version) (*modFileSummary, error) { module: module.Version{Path: m.Path}, } - readVendorList(VendorDir()) + readVendorList(VendorDir(loaderstate)) if vendorVersion[m.Path] != m.Version { // This module is not vendored, so packages cannot be loaded from it and // it cannot be relevant to the build. @@ -599,15 +601,15 @@ func goModSummary(m module.Version) (*modFileSummary, error) { return summary, nil } - actual := resolveReplacement(m) - if mustHaveSums() && actual.Version != "" { + actual := resolveReplacement(loaderstate, m) + if mustHaveSums(loaderstate) && actual.Version != "" { key := module.Version{Path: actual.Path, Version: actual.Version + "/go.mod"} if !modfetch.HaveSum(key) { suggestion := fmt.Sprintf(" for go.mod file; to add it:\n\tgo mod download %s", m.Path) return nil, module.VersionError(actual, &sumMissingError{suggestion: suggestion}) } } - summary, err := rawGoModSummary(actual) + summary, err := rawGoModSummary(loaderstate, actual) if err != nil { return nil, err } @@ -639,8 +641,8 @@ func goModSummary(m module.Version) (*modFileSummary, error) { } } - for _, mainModule := range MainModules.Versions() { - if index := MainModules.Index(mainModule); index != nil && len(index.exclude) > 0 { + for _, mainModule := range loaderstate.MainModules.Versions() { + if index := loaderstate.MainModules.Index(mainModule); index != nil && len(index.exclude) > 0 { // Drop any requirements on excluded versions. // Don't modify the cached summary though, since we might need the raw // summary separately. @@ -674,7 +676,7 @@ func goModSummary(m module.Version) (*modFileSummary, error) { // rawGoModSummary cannot be used on the main module outside of workspace mode. // The modFileSummary can still be used for retractions and deprecations // even if a TooNewError is returned. -func rawGoModSummary(m module.Version) (*modFileSummary, error) { +func rawGoModSummary(loaderstate *State, m module.Version) (*modFileSummary, error) { if gover.IsToolchain(m.Path) { if m.Path == "go" && gover.Compare(m.Version, gover.GoStrictVersion) >= 0 { // Declare that go 1.21.3 requires toolchain 1.21.3, @@ -684,7 +686,7 @@ func rawGoModSummary(m module.Version) (*modFileSummary, error) { } return &modFileSummary{module: m}, nil } - if m.Version == "" && !inWorkspaceMode() && MainModules.Contains(m.Path) { + if m.Version == "" && !loaderstate.inWorkspaceMode() && loaderstate.MainModules.Contains(m.Path) { // Calling rawGoModSummary implies that we are treating m as a module whose // requirements aren't the roots of the module graph and can't be modified. // @@ -692,22 +694,22 @@ func rawGoModSummary(m module.Version) (*modFileSummary, error) { // are the roots of the module graph and we expect them to be kept consistent. panic("internal error: rawGoModSummary called on a main module") } - if m.Version == "" && inWorkspaceMode() && m.Path == "command-line-arguments" { + if m.Version == "" && loaderstate.inWorkspaceMode() && m.Path == "command-line-arguments" { // "go work sync" calls LoadModGraph to make sure the module graph is valid. // If there are no modules in the workspace, we synthesize an empty // command-line-arguments module, which rawGoModData cannot read a go.mod for. return &modFileSummary{module: m}, nil - } else if m.Version == "" && inWorkspaceMode() && MainModules.Contains(m.Path) { + } else if m.Version == "" && loaderstate.inWorkspaceMode() && loaderstate.MainModules.Contains(m.Path) { // When go get uses EnterWorkspace to check that the workspace loads properly, // it will update the contents of the workspace module's modfile in memory. To use the updated // contents of the modfile when doing the load, don't read from disk and instead // recompute a summary using the updated contents of the modfile. - if mf := MainModules.ModFile(m); mf != nil { - return summaryFromModFile(m, MainModules.modFiles[m]) + if mf := loaderstate.MainModules.ModFile(m); mf != nil { + return summaryFromModFile(m, loaderstate.MainModules.modFiles[m]) } } return rawGoModSummaryCache.Do(m, func() (*modFileSummary, error) { - name, data, err := rawGoModData(m) + name, data, err := rawGoModData(loaderstate, m) if err != nil { return nil, err } @@ -779,15 +781,15 @@ var rawGoModSummaryCache par.ErrCache[module.Version, *modFileSummary] // // Unlike rawGoModSummary, rawGoModData does not cache its results in memory. // Use rawGoModSummary instead unless you specifically need these bytes. -func rawGoModData(m module.Version) (name string, data []byte, err error) { +func rawGoModData(loaderstate *State, m module.Version) (name string, data []byte, err error) { if m.Version == "" { dir := m.Path if !filepath.IsAbs(dir) { - if inWorkspaceMode() && MainModules.Contains(m.Path) { - dir = MainModules.ModRoot(m) + if loaderstate.inWorkspaceMode() && loaderstate.MainModules.Contains(m.Path) { + dir = loaderstate.MainModules.ModRoot(m) } else { // m is a replacement module with only a file path. - dir = filepath.Join(replaceRelativeTo(), dir) + dir = filepath.Join(replaceRelativeTo(loaderstate), dir) } } name = filepath.Join(dir, "go.mod") @@ -823,12 +825,12 @@ func rawGoModData(m module.Version) (name string, data []byte, err error) { // // If the queried latest version is replaced, // queryLatestVersionIgnoringRetractions returns the replacement. -func queryLatestVersionIgnoringRetractions(ctx context.Context, path string) (latest module.Version, err error) { +func queryLatestVersionIgnoringRetractions(loaderstate *State, ctx context.Context, path string) (latest module.Version, err error) { return latestVersionIgnoringRetractionsCache.Do(path, func() (module.Version, error) { ctx, span := trace.StartSpan(ctx, "queryLatestVersionIgnoringRetractions "+path) defer span.Done() - if repl := Replacement(module.Version{Path: path}); repl.Path != "" { + if repl := Replacement(loaderstate, module.Version{Path: path}); repl.Path != "" { // All versions of the module were replaced. // No need to query. return repl, nil @@ -838,12 +840,12 @@ func queryLatestVersionIgnoringRetractions(ctx context.Context, path string) (la // Ignore exclusions from the main module's go.mod. const ignoreSelected = "" var allowAll AllowedFunc - rev, err := Query(ctx, path, "latest", ignoreSelected, allowAll) + rev, err := Query(loaderstate, ctx, path, "latest", ignoreSelected, allowAll) if err != nil { return module.Version{}, err } latest := module.Version{Path: path, Version: rev.Version} - if repl := resolveReplacement(latest); repl.Path != "" { + if repl := resolveReplacement(loaderstate, latest); repl.Path != "" { latest = repl } return latest, nil diff --git a/src/cmd/go/internal/modload/mvs.go b/src/cmd/go/internal/modload/mvs.go index 8ae2dbff1e8..63fedae0f16 100644 --- a/src/cmd/go/internal/modload/mvs.go +++ b/src/cmd/go/internal/modload/mvs.go @@ -39,11 +39,12 @@ func cmpVersion(p string, v1, v2 string) int { // mvsReqs implements mvs.Reqs for module semantic versions, // with any exclusions or replacements applied internally. type mvsReqs struct { - roots []module.Version + loaderstate *State // TODO(jitsu): Is there a way we can not depend on the entire loader state? + roots []module.Version } func (r *mvsReqs) Required(mod module.Version) ([]module.Version, error) { - if mod.Version == "" && MainModules.Contains(mod.Path) { + if mod.Version == "" && r.loaderstate.MainModules.Contains(mod.Path) { // Use the build list as it existed when r was constructed, not the current // global build list. return r.roots, nil @@ -53,7 +54,7 @@ func (r *mvsReqs) Required(mod module.Version) ([]module.Version, error) { return nil, nil } - summary, err := goModSummary(mod) + summary, err := goModSummary(r.loaderstate, mod) if err != nil { return nil, err } @@ -79,11 +80,11 @@ func (*mvsReqs) Upgrade(m module.Version) (module.Version, error) { return m, nil } -func versions(ctx context.Context, path string, allowed AllowedFunc) (versions []string, origin *codehost.Origin, err error) { +func versions(loaderstate *State, ctx context.Context, path string, allowed AllowedFunc) (versions []string, origin *codehost.Origin, err error) { // Note: modfetch.Lookup and repo.Versions are cached, // so there's no need for us to add extra caching here. err = modfetch.TryProxies(func(proxy string) error { - repo, err := lookupRepo(ctx, proxy, path) + repo, err := lookupRepo(loaderstate, ctx, proxy, path) if err != nil { return err } @@ -111,12 +112,12 @@ func versions(ctx context.Context, path string, allowed AllowedFunc) (versions [ // // Since the version of a main module is not found in the version list, // it has no previous version. -func previousVersion(ctx context.Context, m module.Version) (module.Version, error) { - if m.Version == "" && MainModules.Contains(m.Path) { +func previousVersion(loaderstate *State, ctx context.Context, m module.Version) (module.Version, error) { + if m.Version == "" && loaderstate.MainModules.Contains(m.Path) { return module.Version{Path: m.Path, Version: "none"}, nil } - list, _, err := versions(ctx, m.Path, CheckAllowed) + list, _, err := versions(loaderstate, ctx, m.Path, loaderstate.CheckAllowed) if err != nil { if errors.Is(err, os.ErrNotExist) { return module.Version{Path: m.Path, Version: "none"}, nil @@ -130,7 +131,7 @@ func previousVersion(ctx context.Context, m module.Version) (module.Version, err return module.Version{Path: m.Path, Version: "none"}, nil } -func (*mvsReqs) Previous(m module.Version) (module.Version, error) { +func (r *mvsReqs) Previous(m module.Version) (module.Version, error) { // TODO(golang.org/issue/38714): thread tracing context through MVS. - return previousVersion(context.TODO(), m) + return previousVersion(r.loaderstate, context.TODO(), m) } diff --git a/src/cmd/go/internal/modload/query.go b/src/cmd/go/internal/modload/query.go index c4cf55442ba..f710ce2c624 100644 --- a/src/cmd/go/internal/modload/query.go +++ b/src/cmd/go/internal/modload/query.go @@ -80,19 +80,19 @@ import ( // // Query often returns a non-nil *RevInfo with a non-nil error, // to provide an info.Origin that can allow the error to be cached. -func Query(ctx context.Context, path, query, current string, allowed AllowedFunc) (*modfetch.RevInfo, error) { +func Query(loaderstate *State, ctx context.Context, path, query, current string, allowed AllowedFunc) (*modfetch.RevInfo, error) { ctx, span := trace.StartSpan(ctx, "modload.Query "+path) defer span.Done() - return queryReuse(ctx, path, query, current, allowed, nil) + return queryReuse(loaderstate, ctx, path, query, current, allowed, nil) } // queryReuse is like Query but also takes a map of module info that can be reused // if the validation criteria in Origin are met. -func queryReuse(ctx context.Context, path, query, current string, allowed AllowedFunc, reuse map[module.Version]*modinfo.ModulePublic) (*modfetch.RevInfo, error) { +func queryReuse(loaderstate *State, ctx context.Context, path, query, current string, allowed AllowedFunc, reuse map[module.Version]*modinfo.ModulePublic) (*modfetch.RevInfo, error) { var info *modfetch.RevInfo err := modfetch.TryProxies(func(proxy string) (err error) { - info, err = queryProxy(ctx, proxy, path, query, current, allowed, reuse) + info, err = queryProxy(loaderstate, ctx, proxy, path, query, current, allowed, reuse) return err }) return info, err @@ -100,9 +100,9 @@ func queryReuse(ctx context.Context, path, query, current string, allowed Allowe // checkReuse checks whether a revision of a given module // for a given module may be reused, according to the information in origin. -func checkReuse(ctx context.Context, m module.Version, old *codehost.Origin) error { +func checkReuse(loaderstate *State, ctx context.Context, m module.Version, old *codehost.Origin) error { return modfetch.TryProxies(func(proxy string) error { - repo, err := lookupRepo(ctx, proxy, m.Path) + repo, err := lookupRepo(loaderstate, ctx, proxy, m.Path) if err != nil { return err } @@ -197,7 +197,7 @@ func (queryDisabledError) Error() string { return fmt.Sprintf("cannot query module due to -mod=%s\n\t(%s)", cfg.BuildMod, cfg.BuildModReason) } -func queryProxy(ctx context.Context, proxy, path, query, current string, allowed AllowedFunc, reuse map[module.Version]*modinfo.ModulePublic) (*modfetch.RevInfo, error) { +func queryProxy(loaderstate *State, ctx context.Context, proxy, path, query, current string, allowed AllowedFunc, reuse map[module.Version]*modinfo.ModulePublic) (*modfetch.RevInfo, error) { ctx, span := trace.StartSpan(ctx, "modload.queryProxy "+path+" "+query) defer span.Done() @@ -211,7 +211,7 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed allowed = func(context.Context, module.Version) error { return nil } } - if MainModules.Contains(path) && (query == "upgrade" || query == "patch") { + if loaderstate.MainModules.Contains(path) && (query == "upgrade" || query == "patch") { m := module.Version{Path: path} if err := allowed(ctx, m); err != nil { return nil, fmt.Errorf("internal error: main module version is not allowed: %w", err) @@ -223,7 +223,7 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed return nil, fmt.Errorf("can't query specific version (%q) of standard-library module %q", query, path) } - repo, err := lookupRepo(ctx, proxy, path) + repo, err := lookupRepo(loaderstate, ctx, proxy, path) if err != nil { return nil, err } @@ -296,7 +296,7 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed return &clone } - releases, prereleases, err := qm.filterVersions(ctx, versions.List) + releases, prereleases, err := qm.filterVersions(loaderstate, ctx, versions.List) if err != nil { return revWithOrigin(nil), err } @@ -569,7 +569,7 @@ func (qm *queryMatcher) allowsVersion(ctx context.Context, v string) bool { // // If the allowed predicate returns an error not equivalent to ErrDisallowed, // filterVersions returns that error. -func (qm *queryMatcher) filterVersions(ctx context.Context, versions []string) (releases, prereleases []string, err error) { +func (qm *queryMatcher) filterVersions(loaderstate *State, ctx context.Context, versions []string) (releases, prereleases []string, err error) { needIncompatible := qm.preferIncompatible var lastCompatible string @@ -602,7 +602,7 @@ func (qm *queryMatcher) filterVersions(ctx context.Context, versions []string) ( // ignore any version with a higher (+incompatible) major version. (See // https://golang.org/issue/34165.) Note that we even prefer a // compatible pre-release over an incompatible release. - ok, err := versionHasGoMod(ctx, module.Version{Path: qm.path, Version: lastCompatible}) + ok, err := versionHasGoMod(loaderstate, ctx, module.Version{Path: qm.path, Version: lastCompatible}) if err != nil { return nil, nil, err } @@ -639,11 +639,11 @@ type QueryResult struct { // QueryPackages is like QueryPattern, but requires that the pattern match at // least one package and omits the non-package result (if any). -func QueryPackages(ctx context.Context, pattern, query string, current func(string) string, allowed AllowedFunc) ([]QueryResult, error) { - pkgMods, modOnly, err := QueryPattern(ctx, pattern, query, current, allowed) +func QueryPackages(loaderstate *State, ctx context.Context, pattern, query string, current func(string) string, allowed AllowedFunc) ([]QueryResult, error) { + pkgMods, modOnly, err := QueryPattern(loaderstate, ctx, pattern, query, current, allowed) if len(pkgMods) == 0 && err == nil { - replacement := Replacement(modOnly.Mod) + replacement := Replacement(loaderstate, modOnly.Mod) return nil, &PackageNotInModuleError{ Mod: modOnly.Mod, Replacement: replacement, @@ -670,7 +670,7 @@ func QueryPackages(ctx context.Context, pattern, query string, current func(stri // // QueryPattern always returns at least one QueryResult (which may be only // modOnly) or a non-nil error. -func QueryPattern(ctx context.Context, pattern, query string, current func(string) string, allowed AllowedFunc) (pkgMods []QueryResult, modOnly *QueryResult, err error) { +func QueryPattern(loaderstate *State, ctx context.Context, pattern, query string, current func(string) string, allowed AllowedFunc) (pkgMods []QueryResult, modOnly *QueryResult, err error) { ctx, span := trace.StartSpan(ctx, "modload.QueryPattern "+pattern+" "+query) defer span.Done() @@ -693,15 +693,15 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin } match = func(mod module.Version, roots []string, isLocal bool) *search.Match { m := search.NewMatch(pattern) - matchPackages(ctx, m, imports.AnyTags(), omitStd, []module.Version{mod}) + matchPackages(loaderstate, ctx, m, imports.AnyTags(), omitStd, []module.Version{mod}) return m } } else { match = func(mod module.Version, roots []string, isLocal bool) *search.Match { m := search.NewMatch(pattern) prefix := mod.Path - if MainModules.Contains(mod.Path) { - prefix = MainModules.PathPrefix(module.Version{Path: mod.Path}) + if loaderstate.MainModules.Contains(mod.Path) { + prefix = loaderstate.MainModules.PathPrefix(module.Version{Path: mod.Path}) } for _, root := range roots { if _, ok, err := dirInModule(pattern, prefix, root, isLocal); err != nil { @@ -715,8 +715,8 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin } var mainModuleMatches []module.Version - for _, mainModule := range MainModules.Versions() { - m := match(mainModule, modRoots, true) + for _, mainModule := range loaderstate.MainModules.Versions() { + m := match(mainModule, loaderstate.modRoots, true) if len(m.Pkgs) > 0 { if query != "upgrade" && query != "patch" { return nil, nil, &QueryMatchesPackagesInMainModuleError{ @@ -756,16 +756,17 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin var ( results []QueryResult - candidateModules = modulePrefixesExcludingTarget(base) + candidateModules = modulePrefixesExcludingTarget(loaderstate, base) ) if len(candidateModules) == 0 { if modOnly != nil { return nil, modOnly, nil } else if len(mainModuleMatches) != 0 { return nil, nil, &QueryMatchesMainModulesError{ - MainModules: mainModuleMatches, - Pattern: pattern, - Query: query, + MainModules: mainModuleMatches, + Pattern: pattern, + Query: query, + PatternIsModule: loaderstate.MainModules.Contains(pattern), } } else { return nil, nil, &PackageNotInModuleError{ @@ -783,7 +784,7 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin pathCurrent := current(path) r.Mod.Path = path - r.Rev, err = queryProxy(ctx, proxy, path, query, pathCurrent, allowed, nil) + r.Rev, err = queryProxy(loaderstate, ctx, proxy, path, query, pathCurrent, allowed, nil) if err != nil { return r, err } @@ -791,7 +792,7 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin if gover.IsToolchain(r.Mod.Path) { return r, nil } - root, isLocal, err := fetch(ctx, r.Mod) + root, isLocal, err := fetch(loaderstate, ctx, r.Mod) if err != nil { return r, err } @@ -801,7 +802,7 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin if err := firstError(m); err != nil { return r, err } - replacement := Replacement(r.Mod) + replacement := Replacement(loaderstate, r.Mod) return r, &PackageNotInModuleError{ Mod: r.Mod, Replacement: replacement, @@ -812,7 +813,7 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin return r, nil } - allResults, err := queryPrefixModules(ctx, candidateModules, queryModule) + allResults, err := queryPrefixModules(loaderstate, ctx, candidateModules, queryModule) results = allResults[:0] for _, r := range allResults { if len(r.Packages) == 0 { @@ -826,8 +827,9 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin if len(mainModuleMatches) > 0 && len(results) == 0 && modOnly == nil && errors.Is(err, fs.ErrNotExist) { return nil, nil, &QueryMatchesMainModulesError{ - Pattern: pattern, - Query: query, + Pattern: pattern, + Query: query, + PatternIsModule: loaderstate.MainModules.Contains(pattern), } } return slices.Clip(results), modOnly, err @@ -838,11 +840,11 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin // itself, sorted by descending length. Prefixes that are not valid module paths // but are valid package paths (like "m" or "example.com/.gen") are included, // since they might be replaced. -func modulePrefixesExcludingTarget(path string) []string { +func modulePrefixesExcludingTarget(loaderstate *State, path string) []string { prefixes := make([]string, 0, strings.Count(path, "/")+1) mainModulePrefixes := make(map[string]bool) - for _, m := range MainModules.Versions() { + for _, m := range loaderstate.MainModules.Versions() { mainModulePrefixes[m.Path] = true } @@ -863,7 +865,7 @@ func modulePrefixesExcludingTarget(path string) []string { return prefixes } -func queryPrefixModules(ctx context.Context, candidateModules []string, queryModule func(ctx context.Context, path string) (QueryResult, error)) (found []QueryResult, err error) { +func queryPrefixModules(loaderstate *State, ctx context.Context, candidateModules []string, queryModule func(ctx context.Context, path string) (QueryResult, error)) (found []QueryResult, err error) { ctx, span := trace.StartSpan(ctx, "modload.queryPrefixModules") defer span.Done() @@ -905,7 +907,7 @@ func queryPrefixModules(ctx context.Context, candidateModules []string, queryMod case *PackageNotInModuleError: // Given the option, prefer to attribute “package not in module” // to modules other than the main one. - if noPackage == nil || MainModules.Contains(noPackage.Mod.Path) { + if noPackage == nil || loaderstate.MainModules.Contains(noPackage.Mod.Path) { noPackage = rErr } case *NoMatchingVersionError: @@ -932,7 +934,7 @@ func queryPrefixModules(ctx context.Context, candidateModules []string, queryMod if notExistErr == nil { notExistErr = rErr } - } else if iv := (*module.InvalidVersionError)(nil); errors.As(rErr, &iv) { + } else if _, ok := errors.AsType[*module.InvalidVersionError](rErr); ok { if invalidVersion == nil { invalidVersion = rErr } @@ -1096,8 +1098,8 @@ func (e *PackageNotInModuleError) ImportPath() string { // go.mod with different content. Second, if we don't fetch the .zip, then // we don't need to verify it in go.sum. This makes 'go list -m -u' faster // and simpler. -func versionHasGoMod(_ context.Context, m module.Version) (bool, error) { - _, data, err := rawGoModData(m) +func versionHasGoMod(loaderstate *State, _ context.Context, m module.Version) (bool, error) { + _, data, err := rawGoModData(loaderstate, m) if err != nil { return false, err } @@ -1112,12 +1114,12 @@ type versionRepo interface { CheckReuse(context.Context, *codehost.Origin) error Versions(ctx context.Context, prefix string) (*modfetch.Versions, error) Stat(ctx context.Context, rev string) (*modfetch.RevInfo, error) - Latest(context.Context) (*modfetch.RevInfo, error) + Latest(ctx context.Context) (*modfetch.RevInfo, error) } var _ versionRepo = modfetch.Repo(nil) -func lookupRepo(ctx context.Context, proxy, path string) (repo versionRepo, err error) { +func lookupRepo(loaderstate *State, ctx context.Context, proxy, path string) (repo versionRepo, err error) { if path != "go" && path != "toolchain" { err = module.CheckPath(path) } @@ -1127,10 +1129,10 @@ func lookupRepo(ctx context.Context, proxy, path string) (repo versionRepo, err repo = emptyRepo{path: path, err: err} } - if MainModules == nil { + if loaderstate.MainModules == nil { return repo, err - } else if _, ok := MainModules.HighestReplaced()[path]; ok { - return &replacementRepo{repo: repo}, nil + } else if _, ok := loaderstate.MainModules.HighestReplaced()[path]; ok { + return &replacementRepo{repo: repo, loaderstate: loaderstate}, nil } return repo, err @@ -1163,7 +1165,8 @@ func (er emptyRepo) Latest(ctx context.Context) (*modfetch.RevInfo, error) { ret // modules, so a replacementRepo should only be constructed for a module that // actually has one or more valid replacements. type replacementRepo struct { - repo versionRepo + repo versionRepo + loaderstate *State } var _ versionRepo = (*replacementRepo)(nil) @@ -1186,8 +1189,8 @@ func (rr *replacementRepo) Versions(ctx context.Context, prefix string) (*modfet } versions := repoVersions.List - for _, mm := range MainModules.Versions() { - if index := MainModules.Index(mm); index != nil && len(index.replace) > 0 { + for _, mm := range rr.loaderstate.MainModules.Versions() { + if index := rr.loaderstate.MainModules.Index(mm); index != nil && len(index.replace) > 0 { path := rr.ModulePath() for m := range index.replace { if m.Path == path && strings.HasPrefix(m.Version, prefix) && m.Version != "" && !module.IsPseudoVersion(m.Version) { @@ -1215,8 +1218,8 @@ func (rr *replacementRepo) Stat(ctx context.Context, rev string) (*modfetch.RevI return info, err } var hasReplacements bool - for _, v := range MainModules.Versions() { - if index := MainModules.Index(v); index != nil && len(index.replace) > 0 { + for _, v := range rr.loaderstate.MainModules.Versions() { + if index := rr.loaderstate.MainModules.Index(v); index != nil && len(index.replace) > 0 { hasReplacements = true } } @@ -1239,7 +1242,7 @@ func (rr *replacementRepo) Stat(ctx context.Context, rev string) (*modfetch.RevI } } - if r := Replacement(module.Version{Path: path, Version: v}); r.Path == "" { + if r := Replacement(rr.loaderstate, module.Version{Path: path, Version: v}); r.Path == "" { return info, err } return rr.replacementStat(v) @@ -1249,7 +1252,7 @@ func (rr *replacementRepo) Latest(ctx context.Context) (*modfetch.RevInfo, error info, err := rr.repo.Latest(ctx) path := rr.ModulePath() - if v, ok := MainModules.HighestReplaced()[path]; ok { + if v, ok := rr.loaderstate.MainModules.HighestReplaced()[path]; ok { if v == "" { // The only replacement is a wildcard that doesn't specify a version, so // synthesize a pseudo-version with an appropriate major version and a @@ -1284,13 +1287,14 @@ func (rr *replacementRepo) replacementStat(v string) (*modfetch.RevInfo, error) // a version of the main module that cannot be satisfied. // (The main module's version cannot be changed.) type QueryMatchesMainModulesError struct { - MainModules []module.Version - Pattern string - Query string + MainModules []module.Version + Pattern string + Query string + PatternIsModule bool // true if pattern is one of the main modules } func (e *QueryMatchesMainModulesError) Error() string { - if MainModules.Contains(e.Pattern) { + if e.PatternIsModule { return fmt.Sprintf("can't request version %q of the main module (%s)", e.Query, e.Pattern) } diff --git a/src/cmd/go/internal/modload/query_test.go b/src/cmd/go/internal/modload/query_test.go index 93f8f0d00d1..a465fab5db3 100644 --- a/src/cmd/go/internal/modload/query_test.go +++ b/src/cmd/go/internal/modload/query_test.go @@ -168,6 +168,7 @@ func TestQuery(t *testing.T) { ctx := context.Background() for _, tt := range queryTests { + loaderstate := NewState() allow := tt.allow if allow == "" { allow = "*" @@ -182,7 +183,7 @@ func TestQuery(t *testing.T) { t.Run(strings.ReplaceAll(tt.path, "/", "_")+"/"+tt.query+"/"+tt.current+"/"+allow, func(t *testing.T) { t.Parallel() - info, err := Query(ctx, tt.path, tt.query, tt.current, allowed) + info, err := Query(loaderstate, ctx, tt.path, tt.query, tt.current, allowed) if tt.err != "" { if err == nil { t.Errorf("Query(_, %q, %q, %q, %v) = %v, want error %q", tt.path, tt.query, tt.current, allow, info.Version, tt.err) diff --git a/src/cmd/go/internal/modload/search.go b/src/cmd/go/internal/modload/search.go index 9ff9738e281..c45808635db 100644 --- a/src/cmd/go/internal/modload/search.go +++ b/src/cmd/go/internal/modload/search.go @@ -41,7 +41,7 @@ const ( // matchPackages is like m.MatchPackages, but uses a local variable (rather than // a global) for tags, can include or exclude packages in the standard library, // and is restricted to the given list of modules. -func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, filter stdFilter, modules []module.Version) { +func matchPackages(loaderstate *State, ctx context.Context, m *search.Match, tags map[string]bool, filter stdFilter, modules []module.Version) { ctx, span := trace.StartSpan(ctx, "modload.matchPackages") defer span.Done() @@ -74,7 +74,7 @@ func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, f ) q := par.NewQueue(runtime.GOMAXPROCS(0)) - ignorePatternsMap := parseIgnorePatterns(ctx, treeCanMatch, modules) + ignorePatternsMap := parseIgnorePatterns(loaderstate, ctx, treeCanMatch, modules) walkPkgs := func(root, importPathRoot string, prune pruning) { _, span := trace.StartSpan(ctx, "walkPkgs "+root) defer span.Done() @@ -171,13 +171,13 @@ func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, f } if cfg.BuildMod == "vendor" { - for _, mod := range MainModules.Versions() { - if modRoot := MainModules.ModRoot(mod); modRoot != "" { - walkPkgs(modRoot, MainModules.PathPrefix(mod), pruneGoMod|pruneVendor) + for _, mod := range loaderstate.MainModules.Versions() { + if modRoot := loaderstate.MainModules.ModRoot(mod); modRoot != "" { + walkPkgs(modRoot, loaderstate.MainModules.PathPrefix(mod), pruneGoMod|pruneVendor) } } - if HasModRoot() { - walkPkgs(VendorDir(), "", pruneVendor) + if loaderstate.HasModRoot() { + walkPkgs(VendorDir(loaderstate), "", pruneVendor) } return } @@ -191,16 +191,16 @@ func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, f root, modPrefix string isLocal bool ) - if MainModules.Contains(mod.Path) { - if MainModules.ModRoot(mod) == "" { + if loaderstate.MainModules.Contains(mod.Path) { + if loaderstate.MainModules.ModRoot(mod) == "" { continue // If there is no main module, we can't search in it. } - root = MainModules.ModRoot(mod) - modPrefix = MainModules.PathPrefix(mod) + root = loaderstate.MainModules.ModRoot(mod) + modPrefix = loaderstate.MainModules.PathPrefix(mod) isLocal = true } else { var err error - root, isLocal, err = fetch(ctx, mod) + root, isLocal, err = fetch(loaderstate, ctx, mod) if err != nil { m.AddError(err) continue @@ -283,20 +283,20 @@ func walkFromIndex(index *modindex.Module, importPathRoot string, isMatch, treeC // // If m is the zero module.Version, MatchInModule matches the pattern // against the standard library (std and cmd) in GOROOT/src. -func MatchInModule(ctx context.Context, pattern string, m module.Version, tags map[string]bool) *search.Match { +func MatchInModule(loaderstate *State, ctx context.Context, pattern string, m module.Version, tags map[string]bool) *search.Match { match := search.NewMatch(pattern) if m == (module.Version{}) { - matchPackages(ctx, match, tags, includeStd, nil) + matchPackages(loaderstate, ctx, match, tags, includeStd, nil) } - LoadModFile(ctx) // Sets Target, needed by fetch and matchPackages. + LoadModFile(loaderstate, ctx) // Sets Target, needed by fetch and matchPackages. if !match.IsLiteral() { - matchPackages(ctx, match, tags, omitStd, []module.Version{m}) + matchPackages(loaderstate, ctx, match, tags, omitStd, []module.Version{m}) return match } - root, isLocal, err := fetch(ctx, m) + root, isLocal, err := fetch(loaderstate, ctx, m) if err != nil { match.Errs = []error{err} return match @@ -322,7 +322,7 @@ func MatchInModule(ctx context.Context, pattern string, m module.Version, tags m // parseIgnorePatterns collects all ignore patterns associated with the // provided list of modules. // It returns a map of module root -> *search.IgnorePatterns. -func parseIgnorePatterns(ctx context.Context, treeCanMatch func(string) bool, modules []module.Version) map[string]*search.IgnorePatterns { +func parseIgnorePatterns(loaderstate *State, ctx context.Context, treeCanMatch func(string) bool, modules []module.Version) map[string]*search.IgnorePatterns { ignorePatternsMap := make(map[string]*search.IgnorePatterns) for _, mod := range modules { if gover.IsToolchain(mod.Path) || !treeCanMatch(mod.Path) { @@ -330,12 +330,12 @@ func parseIgnorePatterns(ctx context.Context, treeCanMatch func(string) bool, mo } var modRoot string var ignorePatterns []string - if MainModules.Contains(mod.Path) { - modRoot = MainModules.ModRoot(mod) + if loaderstate.MainModules.Contains(mod.Path) { + modRoot = loaderstate.MainModules.ModRoot(mod) if modRoot == "" { continue } - modIndex := MainModules.Index(mod) + modIndex := loaderstate.MainModules.Index(mod) if modIndex == nil { continue } @@ -344,11 +344,11 @@ func parseIgnorePatterns(ctx context.Context, treeCanMatch func(string) bool, mo // Skip getting ignore patterns for vendored modules because they // do not have go.mod files. var err error - modRoot, _, err = fetch(ctx, mod) + modRoot, _, err = fetch(loaderstate, ctx, mod) if err != nil { continue } - summary, err := goModSummary(mod) + summary, err := goModSummary(loaderstate, mod) if err != nil { continue } diff --git a/src/cmd/go/internal/modload/vendor.go b/src/cmd/go/internal/modload/vendor.go index c7fe7319358..9956bcdb127 100644 --- a/src/cmd/go/internal/modload/vendor.go +++ b/src/cmd/go/internal/modload/vendor.go @@ -140,10 +140,10 @@ func readVendorList(vendorDir string) { // checkVendorConsistency verifies that the vendor/modules.txt file matches (if // go 1.14) or at least does not contradict (go 1.13 or earlier) the // requirements and replacements listed in the main module's go.mod file. -func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, modRoots []string) { +func checkVendorConsistency(loaderstate *State, indexes []*modFileIndex, modFiles []*modfile.File, modRoots []string) { // readVendorList only needs the main module to get the directory // the vendor directory is in. - readVendorList(VendorDir()) + readVendorList(VendorDir(loaderstate)) if len(modFiles) < 1 { // We should never get here if there are zero modfiles. Either @@ -154,7 +154,7 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m } pre114 := false - if !inWorkspaceMode() { // workspace mode was added after Go 1.14 + if !loaderstate.inWorkspaceMode() { // workspace mode was added after Go 1.14 if len(indexes) != 1 { panic(fmt.Errorf("not in workspace mode but number of indexes is %v, not 1", len(indexes))) } @@ -188,7 +188,7 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m // However, we can at least detect a version mismatch if packages were // vendored from a non-matching version. if vv, ok := vendorVersion[r.Mod.Path]; ok && vv != r.Mod.Version { - vendErrorf(r.Mod, fmt.Sprintf("is explicitly required in go.mod, but vendor/modules.txt indicates %s@%s", r.Mod.Path, vv)) + vendErrorf(r.Mod, "is explicitly required in go.mod, but vendor/modules.txt indicates %s@%s", r.Mod.Path, vv) } } else { vendErrorf(r.Mod, "is explicitly required in go.mod, but not marked as explicit in vendor/modules.txt") @@ -215,8 +215,8 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m continue // Don't print the same error more than once } seenrep[r.Old] = true - rNew, modRoot, replacementSource := replacementFrom(r.Old) - rNewCanonical := canonicalizeReplacePath(rNew, modRoot) + rNew, modRoot, replacementSource := replacementFrom(loaderstate, r.Old) + rNewCanonical := canonicalizeReplacePath(loaderstate, rNew, modRoot) vr := vendorMeta[r.Old].Replacement if vr == (module.Version{}) { if rNewCanonical == (module.Version{}) { @@ -236,8 +236,8 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m for _, modFile := range modFiles { checkReplace(modFile.Replace) } - if MainModules.workFile != nil { - checkReplace(MainModules.workFile.Replace) + if loaderstate.MainModules.workFile != nil { + checkReplace(loaderstate.MainModules.workFile.Replace) } for _, mod := range vendorList { @@ -252,7 +252,7 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m } if !foundRequire { article := "" - if inWorkspaceMode() { + if loaderstate.inWorkspaceMode() { article = "a " } vendErrorf(mod, "is marked as explicit in vendor/modules.txt, but not explicitly required in %vgo.mod", article) @@ -262,9 +262,9 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m } for _, mod := range vendorReplaced { - r := Replacement(mod) + r := Replacement(loaderstate, mod) replacementSource := "go.mod" - if inWorkspaceMode() { + if loaderstate.inWorkspaceMode() { replacementSource = "the workspace" } if r == (module.Version{}) { @@ -276,9 +276,9 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m if vendErrors.Len() > 0 { subcmd := "mod" - if inWorkspaceMode() { + if loaderstate.inWorkspaceMode() { subcmd = "work" } - base.Fatalf("go: inconsistent vendoring in %s:%s\n\n\tTo ignore the vendor directory, use -mod=readonly or -mod=mod.\n\tTo sync the vendor directory, run:\n\t\tgo %s vendor", filepath.Dir(VendorDir()), vendErrors, subcmd) + base.Fatalf("go: inconsistent vendoring in %s:%s\n\n\tTo ignore the vendor directory, use -mod=readonly or -mod=mod.\n\tTo sync the vendor directory, run:\n\t\tgo %s vendor", filepath.Dir(VendorDir(loaderstate)), vendErrors, subcmd) } } diff --git a/src/cmd/go/internal/run/run.go b/src/cmd/go/internal/run/run.go index b81b1a007bd..ebd99ccfb21 100644 --- a/src/cmd/go/internal/run/run.go +++ b/src/cmd/go/internal/run/run.go @@ -71,21 +71,22 @@ func init() { } func runRun(ctx context.Context, cmd *base.Command, args []string) { + moduleLoaderState := modload.NewState() if shouldUseOutsideModuleMode(args) { // Set global module flags for 'go run cmd@version'. // This must be done before modload.Init, but we need to call work.BuildInit // before loading packages, since it affects package locations, e.g., // for -race and -msan. - modload.ForceUseModules = true - modload.RootMode = modload.NoRoot - modload.AllowMissingModuleImports() - modload.Init() + moduleLoaderState.ForceUseModules = true + moduleLoaderState.RootMode = modload.NoRoot + moduleLoaderState.AllowMissingModuleImports() + modload.Init(moduleLoaderState) } else { - modload.InitWorkfile() + moduleLoaderState.InitWorkfile() } - work.BuildInit() - b := work.NewBuilder("") + work.BuildInit(moduleLoaderState) + b := work.NewBuilder("", moduleLoaderState.VendorDirOrEmpty) defer func() { if err := b.Close(); err != nil { base.Fatal(err) @@ -107,18 +108,18 @@ func runRun(ctx context.Context, cmd *base.Command, args []string) { base.Fatalf("go: cannot run *_test.go files (%s)", file) } } - p = load.GoFilesPackage(ctx, pkgOpts, files) + p = load.GoFilesPackage(moduleLoaderState, ctx, pkgOpts, files) } else if len(args) > 0 && !strings.HasPrefix(args[0], "-") { arg := args[0] var pkgs []*load.Package if strings.Contains(arg, "@") && !build.IsLocalImport(arg) && !filepath.IsAbs(arg) { var err error - pkgs, err = load.PackagesAndErrorsOutsideModule(ctx, pkgOpts, args[:1]) + pkgs, err = load.PackagesAndErrorsOutsideModule(moduleLoaderState, ctx, pkgOpts, args[:1]) if err != nil { base.Fatal(err) } } else { - pkgs = load.PackagesAndErrors(ctx, pkgOpts, args[:1]) + pkgs = load.PackagesAndErrors(moduleLoaderState, ctx, pkgOpts, args[:1]) } if len(pkgs) == 0 { @@ -140,7 +141,7 @@ func runRun(ctx context.Context, cmd *base.Command, args []string) { load.CheckPackageErrors([]*load.Package{p}) if cfg.BuildCover { - load.PrepareForCoverageBuild([]*load.Package{p}) + load.PrepareForCoverageBuild(moduleLoaderState, []*load.Package{p}) } p.Internal.OmitDebug = true @@ -166,7 +167,7 @@ func runRun(ctx context.Context, cmd *base.Command, args []string) { p.Internal.ExeName = p.DefaultExecName() } - a1 := b.LinkAction(work.ModeBuild, work.ModeBuild, p) + a1 := b.LinkAction(moduleLoaderState, work.ModeBuild, work.ModeBuild, p) a1.CacheExecutable = true a := &work.Action{Mode: "go run", Actor: work.ActorFunc(buildRunProgram), Args: cmdArgs, Deps: []*work.Action{a1}} b.Do(ctx, a) diff --git a/src/cmd/go/internal/telemetrystats/telemetrystats.go b/src/cmd/go/internal/telemetrystats/telemetrystats.go index d5b642240f1..81a6e1e1758 100644 --- a/src/cmd/go/internal/telemetrystats/telemetrystats.go +++ b/src/cmd/go/internal/telemetrystats/telemetrystats.go @@ -22,13 +22,23 @@ func Increment() { // incrementConfig increments counters for the configuration // the command is running in. func incrementConfig() { - if !modload.WillBeEnabled() { + // TODO(jitsu): Telemetry for the go/mode counters should eventually be + // moved to modload.Init() + s := modload.NewState() + if !s.WillBeEnabled() { counter.Inc("go/mode:gopath") - } else if workfile := modload.FindGoWork(base.Cwd()); workfile != "" { + } else if workfile := s.FindGoWork(base.Cwd()); workfile != "" { counter.Inc("go/mode:workspace") } else { counter.Inc("go/mode:module") } + + if cfg.BuildContext.CgoEnabled { + counter.Inc("go/cgo:enabled") + } else { + counter.Inc("go/cgo:disabled") + } + counter.Inc("go/platform/target/goos:" + cfg.Goos) counter.Inc("go/platform/target/goarch:" + cfg.Goarch) switch cfg.Goarch { diff --git a/src/cmd/go/internal/test/flagdefs.go b/src/cmd/go/internal/test/flagdefs.go index 8aa0bfc2bf3..b8b4bf649e4 100644 --- a/src/cmd/go/internal/test/flagdefs.go +++ b/src/cmd/go/internal/test/flagdefs.go @@ -9,6 +9,7 @@ package test // passFlagToTest contains the flags that should be forwarded to // the test binary with the prefix "test.". var passFlagToTest = map[string]bool{ + "artifacts": true, "bench": true, "benchmem": true, "benchtime": true, diff --git a/src/cmd/go/internal/test/test.go b/src/cmd/go/internal/test/test.go index 8bfb3c149b6..b30b2abc0e9 100644 --- a/src/cmd/go/internal/test/test.go +++ b/src/cmd/go/internal/test/test.go @@ -162,7 +162,7 @@ In addition to the build flags, the flags handled by 'go test' itself are: Also emits build output in JSON. See 'go help buildjson'. -o file - Compile the test binary to the named file. + Save a copy of the test binary to the named file. The test still runs (unless -c or -i is specified). If file ends in a slash or names an existing directory, the test is written to pkg.test in that directory. @@ -192,6 +192,10 @@ and -show_bytes options of pprof control how the information is presented. The following flags are recognized by the 'go test' command and control the execution of any test: + -artifacts + Save test artifacts in the directory specified by -outputdir. + See 'go doc testing.T.ArtifactDir'. + -bench regexp Run only those benchmarks matching a regular expression. By default, no benchmarks are run. @@ -286,6 +290,10 @@ control the execution of any test: This will only list top-level tests. No subtest or subbenchmarks will be shown. + -outputdir directory + Place output files from profiling and test artifacts in the + specified directory, by default the directory in which "go test" is running. + -parallel n Allow parallel execution of test functions that call t.Parallel, and fuzz targets that call t.Parallel when running the seed corpus. @@ -397,10 +405,6 @@ profile the tests during execution: Sample 1 in n stack traces of goroutines holding a contended mutex. - -outputdir directory - Place output files from profiling in the specified directory, - by default the directory in which "go test" is running. - -trace trace.out Write an execution trace to the specified file before exiting. @@ -540,6 +544,7 @@ See the documentation of the testing package for more information. } var ( + testArtifacts bool // -artifacts flag testBench string // -bench flag testC bool // -c flag testCoverPkgs []*load.Package // -coverpkg flag @@ -677,8 +682,9 @@ var defaultVetFlags = []string{ } func runTest(ctx context.Context, cmd *base.Command, args []string) { + moduleLoaderState := modload.NewState() pkgArgs, testArgs = testFlags(args) - modload.InitWorkfile() // The test command does custom flag processing; initialize workspaces after that. + moduleLoaderState.InitWorkfile() // The test command does custom flag processing; initialize workspaces after that. if cfg.DebugTrace != "" { var close func() error @@ -699,12 +705,13 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) { work.FindExecCmd() // initialize cached result - work.BuildInit() + work.BuildInit(moduleLoaderState) work.VetFlags = testVet.flags work.VetExplicit = testVet.explicit + work.VetTool = base.Tool("vet") pkgOpts := load.PackageOpts{ModResolveTests: true} - pkgs = load.PackagesAndErrors(ctx, pkgOpts, pkgArgs) + pkgs = load.PackagesAndErrors(moduleLoaderState, ctx, pkgOpts, pkgArgs) // We *don't* call load.CheckPackageErrors here because we want to report // loading errors as per-package test setup errors later. if len(pkgs) == 0 { @@ -730,12 +737,12 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) { // the module cache (or permanently alter the behavior of std tests for all // users) by writing the failing input to the package's testdata directory. // (See https://golang.org/issue/48495 and test_fuzz_modcache.txt.) - mainMods := modload.MainModules + mainMods := moduleLoaderState.MainModules if m := pkgs[0].Module; m != nil && m.Path != "" { if !mainMods.Contains(m.Path) { base.Fatalf("cannot use -fuzz flag on package outside the main module") } - } else if pkgs[0].Standard && modload.Enabled() { + } else if pkgs[0].Standard && moduleLoaderState.Enabled() { // Because packages in 'std' and 'cmd' are part of the standard library, // they are only treated as part of a module in 'go mod' subcommands and // 'go get'. However, we still don't want to accidentally corrupt their @@ -848,7 +855,7 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) { } } - b := work.NewBuilder("") + b := work.NewBuilder("", moduleLoaderState.VendorDirOrEmpty) defer func() { if err := b.Close(); err != nil { base.Fatal(err) @@ -859,15 +866,15 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) { var writeCoverMetaAct *work.Action if cfg.BuildCoverPkg != nil { - match := make([]func(*load.Package) bool, len(cfg.BuildCoverPkg)) + match := make([]func(*modload.State, *load.Package) bool, len(cfg.BuildCoverPkg)) for i := range cfg.BuildCoverPkg { match[i] = load.MatchPackage(cfg.BuildCoverPkg[i], base.Cwd()) } // Select for coverage all dependencies matching the -coverpkg // patterns. - plist := load.TestPackageList(ctx, pkgOpts, pkgs) - testCoverPkgs = load.SelectCoverPackages(plist, match, "test") + plist := load.TestPackageList(moduleLoaderState, ctx, pkgOpts, pkgs) + testCoverPkgs = load.SelectCoverPackages(moduleLoaderState, plist, match, "test") if len(testCoverPkgs) > 0 { // create a new singleton action that will collect up the // meta-data files from all of the packages mentioned in @@ -945,7 +952,7 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) { "testing": true, "time": true, } - for _, p := range load.TestPackageList(ctx, pkgOpts, pkgs) { + for _, p := range load.TestPackageList(moduleLoaderState, ctx, pkgOpts, pkgs) { if !skipInstrumentation[p.ImportPath] { p.Internal.FuzzInstrument = true } @@ -975,7 +982,7 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) { // happens we'll wind up building the Q compile action // before updating its deps to include sync/atomic). if cfg.BuildCoverMode == "atomic" && p.ImportPath != "sync/atomic" { - load.EnsureImport(p, "sync/atomic") + load.EnsureImport(moduleLoaderState, p, "sync/atomic") } // Tag the package for static meta-data generation if no // test files (this works only with the new coverage @@ -1042,7 +1049,7 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) { reportSetupFailed(firstErrPkg, firstErrPkg.Error) continue } - buildTest, runTest, printTest, perr, err := builderTest(b, ctx, pkgOpts, p, allImports[p], writeCoverMetaAct) + buildTest, runTest, printTest, perr, err := builderTest(moduleLoaderState, b, ctx, pkgOpts, p, allImports[p], writeCoverMetaAct) if err != nil { reportErr(perr, err) reportSetupFailed(perr, err) @@ -1123,7 +1130,7 @@ var windowsBadWords = []string{ "update", } -func builderTest(b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts, p *load.Package, imported bool, writeCoverMetaAct *work.Action) (buildAction, runAction, printAction *work.Action, perr *load.Package, err error) { +func builderTest(loaderstate *modload.State, b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts, p *load.Package, imported bool, writeCoverMetaAct *work.Action) (buildAction, runAction, printAction *work.Action, perr *load.Package, err error) { if len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 { build := b.CompileAction(work.ModeBuild, work.ModeBuild, p) run := &work.Action{ @@ -1151,7 +1158,7 @@ func builderTest(b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts, run.Deps = append(run.Deps, writeCoverMetaAct) writeCoverMetaAct.Deps = append(writeCoverMetaAct.Deps, build) } - addTestVet(b, p, run, nil) + addTestVet(loaderstate, b, p, run, nil) print := &work.Action{ Mode: "test print", Actor: work.ActorFunc(builderPrintTest), @@ -1175,7 +1182,7 @@ func builderTest(b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts, Paths: cfg.BuildCoverPkg, } } - pmain, ptest, pxtest, perr := load.TestPackagesFor(ctx, pkgOpts, p, cover) + pmain, ptest, pxtest, perr := load.TestPackagesFor(loaderstate, ctx, pkgOpts, p, cover) if perr != nil { return nil, nil, nil, perr, perr.Error } @@ -1215,7 +1222,7 @@ func builderTest(b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts, } } - a := b.LinkAction(work.ModeBuild, work.ModeBuild, pmain) + a := b.LinkAction(loaderstate, work.ModeBuild, work.ModeBuild, pmain) a.Target = testDir + testBinary + cfg.ExeSuffix if cfg.Goos == "windows" { // There are many reserved words on Windows that, @@ -1341,10 +1348,10 @@ func builderTest(b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts, } if len(ptest.GoFiles)+len(ptest.CgoFiles) > 0 { - addTestVet(b, ptest, vetRunAction, installAction) + addTestVet(loaderstate, b, ptest, vetRunAction, installAction) } if pxtest != nil { - addTestVet(b, pxtest, vetRunAction, installAction) + addTestVet(loaderstate, b, pxtest, vetRunAction, installAction) } if installAction != nil { @@ -1359,12 +1366,12 @@ func builderTest(b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts, return buildAction, runAction, printAction, nil, nil } -func addTestVet(b *work.Builder, p *load.Package, runAction, installAction *work.Action) { +func addTestVet(loaderstate *modload.State, b *work.Builder, p *load.Package, runAction, installAction *work.Action) { if testVet.off { return } - vet := b.VetAction(work.ModeBuild, work.ModeBuild, p) + vet := b.VetAction(loaderstate, work.ModeBuild, work.ModeBuild, p) runAction.Deps = append(runAction.Deps, vet) // Install will clean the build directory. // Make sure vet runs first. @@ -1735,8 +1742,7 @@ func (r *runTestActor) Act(b *work.Builder, ctx context.Context, a *work.Action) } else if errors.Is(err, exec.ErrWaitDelay) { fmt.Fprintf(cmd.Stdout, "*** Test I/O incomplete %v after exiting.\n", cmd.WaitDelay) } - var ee *exec.ExitError - if len(out) == 0 || !errors.As(err, &ee) || !ee.Exited() { + if ee, ok := errors.AsType[*exec.ExitError](err); !ok || !ee.Exited() || len(out) == 0 { // If there was no test output, print the exit status so that the reason // for failure is clear. fmt.Fprintf(cmd.Stdout, "%s\n", err) diff --git a/src/cmd/go/internal/test/testflag.go b/src/cmd/go/internal/test/testflag.go index 983e8f56e9a..d6891a1d0b9 100644 --- a/src/cmd/go/internal/test/testflag.go +++ b/src/cmd/go/internal/test/testflag.go @@ -44,6 +44,7 @@ func init() { // some of them so that cmd/go knows what to do with the test output, or knows // to build the test in a way that supports the use of the flag. + cf.BoolVar(&testArtifacts, "artifacts", false, "") cf.StringVar(&testBench, "bench", "", "") cf.Bool("benchmem", false, "") cf.String("benchtime", "", "") @@ -260,7 +261,7 @@ func testFlags(args []string) (packageNames, passToTest []string) { break } - if nf := (cmdflag.NonFlagError{}); errors.As(err, &nf) { + if nf, ok := errors.AsType[cmdflag.NonFlagError](err); ok { if !inPkgList && packageNames != nil { // We already saw the package list previously, and this argument is not // a flag, so it — and everything after it — must be either a value for @@ -295,7 +296,7 @@ func testFlags(args []string) (packageNames, passToTest []string) { inPkgList = false } - if nd := (cmdflag.FlagNotDefinedError{}); errors.As(err, &nd) { + if nd, ok := errors.AsType[cmdflag.FlagNotDefinedError](err); ok { // This is a flag we do not know. We must assume that any args we see // after this might be flag arguments, not package names, so make // packageNames non-nil to indicate that the package list is complete. @@ -392,7 +393,8 @@ func testFlags(args []string) (packageNames, passToTest []string) { // directory, but 'go test' defaults it to the working directory of the 'go' // command. Set it explicitly if it is needed due to some other flag that // requests output. - if testProfile() != "" && !outputDirSet { + needOutputDir := testProfile() != "" || testArtifacts + if needOutputDir && !outputDirSet { injectedFlags = append(injectedFlags, "-test.outputdir="+testOutputDir.getAbs()) } diff --git a/src/cmd/go/internal/tool/tool.go b/src/cmd/go/internal/tool/tool.go index 120ef5339be..92e8a803105 100644 --- a/src/cmd/go/internal/tool/tool.go +++ b/src/cmd/go/internal/tool/tool.go @@ -78,9 +78,10 @@ func init() { } func runTool(ctx context.Context, cmd *base.Command, args []string) { + moduleLoaderState := modload.NewState() if len(args) == 0 { counter.Inc("go/subcommand:tool") - listTools(ctx) + listTools(moduleLoaderState, ctx) return } toolName := args[0] @@ -108,14 +109,14 @@ func runTool(ctx context.Context, cmd *base.Command, args []string) { if tool := loadBuiltinTool(toolName); tool != "" { // Increment a counter for the tool subcommand with the tool name. counter.Inc("go/subcommand:tool-" + toolName) - buildAndRunBuiltinTool(ctx, toolName, tool, args[1:]) + buildAndRunBuiltinTool(moduleLoaderState, ctx, toolName, tool, args[1:]) return } // Try to build and run mod tool. - tool := loadModTool(ctx, toolName) + tool := loadModTool(moduleLoaderState, ctx, toolName) if tool != "" { - buildAndRunModtool(ctx, toolName, tool, args[1:]) + buildAndRunModtool(moduleLoaderState, ctx, toolName, tool, args[1:]) return } @@ -132,7 +133,7 @@ func runTool(ctx context.Context, cmd *base.Command, args []string) { } // listTools prints a list of the available tools in the tools directory. -func listTools(ctx context.Context) { +func listTools(loaderstate *modload.State, ctx context.Context) { f, err := os.Open(build.ToolDir) if err != nil { fmt.Fprintf(os.Stderr, "go: no tool directory: %s\n", err) @@ -161,9 +162,9 @@ func listTools(ctx context.Context) { fmt.Println(name) } - modload.InitWorkfile() - modload.LoadModFile(ctx) - modTools := slices.Sorted(maps.Keys(modload.MainModules.Tools())) + loaderstate.InitWorkfile() + modload.LoadModFile(loaderstate, ctx) + modTools := slices.Sorted(maps.Keys(loaderstate.MainModules.Tools())) for _, tool := range modTools { fmt.Println(tool) } @@ -251,12 +252,12 @@ func loadBuiltinTool(toolName string) string { return cmdTool } -func loadModTool(ctx context.Context, name string) string { - modload.InitWorkfile() - modload.LoadModFile(ctx) +func loadModTool(loaderstate *modload.State, ctx context.Context, name string) string { + loaderstate.InitWorkfile() + modload.LoadModFile(loaderstate, ctx) matches := []string{} - for tool := range modload.MainModules.Tools() { + for tool := range loaderstate.MainModules.Tools() { if tool == name || defaultExecName(tool) == name { matches = append(matches, tool) } @@ -300,7 +301,7 @@ func builtTool(runAction *work.Action) string { return linkAction.BuiltTarget() } -func buildAndRunBuiltinTool(ctx context.Context, toolName, tool string, args []string) { +func buildAndRunBuiltinTool(loaderstate *modload.State, ctx context.Context, toolName, tool string, args []string) { // Override GOOS and GOARCH for the build to build the tool using // the same GOOS and GOARCH as this go command. cfg.ForceHost() @@ -308,17 +309,17 @@ func buildAndRunBuiltinTool(ctx context.Context, toolName, tool string, args []s // Ignore go.mod and go.work: we don't need them, and we want to be able // to run the tool even if there's an issue with the module or workspace the // user happens to be in. - modload.RootMode = modload.NoRoot + loaderstate.RootMode = modload.NoRoot runFunc := func(b *work.Builder, ctx context.Context, a *work.Action) error { cmdline := str.StringList(builtTool(a), a.Args) return runBuiltTool(toolName, nil, cmdline) } - buildAndRunTool(ctx, tool, args, runFunc) + buildAndRunTool(loaderstate, ctx, tool, args, runFunc) } -func buildAndRunModtool(ctx context.Context, toolName, tool string, args []string) { +func buildAndRunModtool(loaderstate *modload.State, ctx context.Context, toolName, tool string, args []string) { runFunc := func(b *work.Builder, ctx context.Context, a *work.Action) error { // Use the ExecCmd to run the binary, as go run does. ExecCmd allows users // to provide a runner to run the binary, for example a simulator for binaries @@ -332,12 +333,12 @@ func buildAndRunModtool(ctx context.Context, toolName, tool string, args []strin return runBuiltTool(toolName, env, cmdline) } - buildAndRunTool(ctx, tool, args, runFunc) + buildAndRunTool(loaderstate, ctx, tool, args, runFunc) } -func buildAndRunTool(ctx context.Context, tool string, args []string, runTool work.ActorFunc) { - work.BuildInit() - b := work.NewBuilder("") +func buildAndRunTool(loaderstate *modload.State, ctx context.Context, tool string, args []string, runTool work.ActorFunc) { + work.BuildInit(loaderstate) + b := work.NewBuilder("", loaderstate.VendorDirOrEmpty) defer func() { if err := b.Close(); err != nil { base.Fatal(err) @@ -345,11 +346,11 @@ func buildAndRunTool(ctx context.Context, tool string, args []string, runTool wo }() pkgOpts := load.PackageOpts{MainOnly: true} - p := load.PackagesAndErrors(ctx, pkgOpts, []string{tool})[0] + p := load.PackagesAndErrors(loaderstate, ctx, pkgOpts, []string{tool})[0] p.Internal.OmitDebug = true p.Internal.ExeName = p.DefaultExecName() - a1 := b.LinkAction(work.ModeBuild, work.ModeBuild, p) + a1 := b.LinkAction(loaderstate, work.ModeBuild, work.ModeBuild, p) a1.CacheExecutable = true a := &work.Action{Mode: "go tool", Actor: runTool, Args: args, Deps: []*work.Action{a1}} b.Do(ctx, a) diff --git a/src/cmd/go/internal/toolchain/select.go b/src/cmd/go/internal/toolchain/select.go index e8712613366..4c7e7a5e576 100644 --- a/src/cmd/go/internal/toolchain/select.go +++ b/src/cmd/go/internal/toolchain/select.go @@ -95,10 +95,11 @@ var toolchainTrace = godebug.New("#toolchaintrace").Value() == "1" // It must be called early in startup. // See https://go.dev/doc/toolchain#select. func Select() { + moduleLoaderState := modload.NewState() log.SetPrefix("go: ") defer log.SetPrefix("") - if !modload.WillBeEnabled() { + if !moduleLoaderState.WillBeEnabled() { return } @@ -171,7 +172,7 @@ func Select() { gotoolchain = minToolchain if mode == "auto" || mode == "path" { // Read go.mod to find new minimum and suggested toolchain. - file, goVers, toolchain := modGoToolchain() + file, goVers, toolchain := modGoToolchain(moduleLoaderState) gover.Startup.AutoFile = file if toolchain == "default" { // "default" means always use the default toolchain, @@ -231,7 +232,7 @@ func Select() { } } } - maybeSwitchForGoInstallVersion(minVers) + maybeSwitchForGoInstallVersion(moduleLoaderState, minVers) } // If we are invoked as a target toolchain, confirm that @@ -283,7 +284,8 @@ func Select() { } counterSelectExec.Inc() - Exec(gotoolchain) + Exec(moduleLoaderState, gotoolchain) + panic("unreachable") } var counterSelectExec = counter.New("go/toolchain/select-exec") @@ -300,7 +302,7 @@ var TestVersionSwitch string // If $GOTOOLCHAIN is set to path or min+path, Exec only considers the PATH // as a source of Go toolchains. Otherwise Exec tries the PATH but then downloads // a toolchain if necessary. -func Exec(gotoolchain string) { +func Exec(s *modload.State, gotoolchain string) { log.SetPrefix("go: ") writeBits = sysWriteBits() @@ -351,12 +353,6 @@ func Exec(gotoolchain string) { base.Fatalf("cannot find %q in PATH", gotoolchain) } - // Set up modules without an explicit go.mod, to download distribution. - modload.Reset() - modload.ForceUseModules = true - modload.RootMode = modload.NoRoot - modload.Init() - // Download and unpack toolchain module into module cache. // Note that multiple go commands might be doing this at the same time, // and that's OK: the module cache handles that case correctly. @@ -527,9 +523,9 @@ func raceSafeCopy(old, new string) error { // modGoToolchain finds the enclosing go.work or go.mod file // and returns the go version and toolchain lines from the file. // The toolchain line overrides the version line -func modGoToolchain() (file, goVers, toolchain string) { +func modGoToolchain(loaderstate *modload.State) (file, goVers, toolchain string) { wd := base.UncachedCwd() - file = modload.FindGoWork(wd) + file = loaderstate.FindGoWork(wd) // $GOWORK can be set to a file that does not yet exist, if we are running 'go work init'. // Do not try to load the file in that case if _, err := os.Stat(file); err != nil { @@ -551,7 +547,7 @@ func modGoToolchain() (file, goVers, toolchain string) { // maybeSwitchForGoInstallVersion reports whether the command line is go install m@v or go run m@v. // If so, switch to the go version required to build m@v if it's higher than minVers. -func maybeSwitchForGoInstallVersion(minVers string) { +func maybeSwitchForGoInstallVersion(loaderstate *modload.State, minVers string) { // Note: We assume there are no flags between 'go' and 'install' or 'run'. // During testing there are some debugging flags that are accepted // in that position, but in production go binaries there are not. @@ -692,27 +688,27 @@ func maybeSwitchForGoInstallVersion(minVers string) { // command lines if we add new flags in the future. // Set up modules without an explicit go.mod, to download go.mod. - modload.ForceUseModules = true - modload.RootMode = modload.NoRoot - modload.Init() - defer modload.Reset() + loaderstate.ForceUseModules = true + loaderstate.RootMode = modload.NoRoot + modload.Init(loaderstate) + defer loaderstate.Reset() // See internal/load.PackagesAndErrorsOutsideModule ctx := context.Background() - allowed := modload.CheckAllowed + allowed := loaderstate.CheckAllowed if modload.IsRevisionQuery(path, version) { // Don't check for retractions if a specific revision is requested. allowed = nil } noneSelected := func(path string) (version string) { return "none" } - _, err := modload.QueryPackages(ctx, path, version, noneSelected, allowed) + _, err := modload.QueryPackages(loaderstate, ctx, path, version, noneSelected, allowed) if errors.Is(err, gover.ErrTooNew) { // Run early switch, same one go install or go run would eventually do, // if it understood all the command-line flags. - var s Switcher + s := NewSwitcher(loaderstate) s.Error(err) if s.TooNew != nil && gover.Compare(s.TooNew.GoVersion, minVers) > 0 { - SwitchOrFatal(ctx, err) + SwitchOrFatal(loaderstate, ctx, err) } } } diff --git a/src/cmd/go/internal/toolchain/switch.go b/src/cmd/go/internal/toolchain/switch.go index 37c1bcdcbec..76b608fdef4 100644 --- a/src/cmd/go/internal/toolchain/switch.go +++ b/src/cmd/go/internal/toolchain/switch.go @@ -16,6 +16,7 @@ import ( "cmd/go/internal/cfg" "cmd/go/internal/gover" "cmd/go/internal/modfetch" + "cmd/go/internal/modload" "cmd/internal/telemetry/counter" ) @@ -31,8 +32,15 @@ import ( // // See https://go.dev/doc/toolchain#switch. type Switcher struct { - TooNew *gover.TooNewError // max go requirement observed - Errors []error // errors collected so far + TooNew *gover.TooNewError // max go requirement observed + Errors []error // errors collected so far + loaderstate *modload.State // temporarily here while we eliminate global module loader state +} + +func NewSwitcher(s *modload.State) *Switcher { + sw := new(Switcher) + sw.loaderstate = s + return sw } // Error reports the error to the Switcher, @@ -100,7 +108,7 @@ func (s *Switcher) Switch(ctx context.Context) { fmt.Fprintf(os.Stderr, "go: %v requires go >= %v; switching to %v\n", s.TooNew.What, s.TooNew.GoVersion, tv) counterSwitchExec.Inc() - Exec(tv) + Exec(s.loaderstate, tv) panic("unreachable") } @@ -108,8 +116,8 @@ var counterSwitchExec = counter.New("go/toolchain/switch-exec") // SwitchOrFatal attempts a toolchain switch based on the information in err // and otherwise falls back to base.Fatal(err). -func SwitchOrFatal(ctx context.Context, err error) { - var s Switcher +func SwitchOrFatal(loaderstate *modload.State, ctx context.Context, err error) { + s := NewSwitcher(loaderstate) s.Error(err) s.Switch(ctx) base.Exit() diff --git a/src/cmd/go/internal/vcs/vcs.go b/src/cmd/go/internal/vcs/vcs.go index edbc5734401..7c198c5f2b4 100644 --- a/src/cmd/go/internal/vcs/vcs.go +++ b/src/cmd/go/internal/vcs/vcs.go @@ -237,7 +237,7 @@ func parseRevTime(out []byte) (string, time.Time, error) { } rev := buf[:i] - secs, err := strconv.ParseInt(string(buf[i+1:]), 10, 64) + secs, err := strconv.ParseInt(buf[i+1:], 10, 64) if err != nil { return "", time.Time{}, fmt.Errorf("unrecognized VCS tool output: %v", err) } diff --git a/src/cmd/go/internal/vcweb/hg.go b/src/cmd/go/internal/vcweb/hg.go index 4571277c9f1..fb77d1a2fcc 100644 --- a/src/cmd/go/internal/vcweb/hg.go +++ b/src/cmd/go/internal/vcweb/hg.go @@ -25,6 +25,13 @@ type hgHandler struct { once sync.Once hgPath string hgPathErr error + + mu sync.Mutex + wg sync.WaitGroup + ctx context.Context + cancel func() + cmds []*exec.Cmd + url map[string]*url.URL } func (h *hgHandler) Available() bool { @@ -34,6 +41,30 @@ func (h *hgHandler) Available() bool { return h.hgPathErr == nil } +func (h *hgHandler) Close() error { + h.mu.Lock() + defer h.mu.Unlock() + + if h.cancel == nil { + return nil + } + + h.cancel() + for _, cmd := range h.cmds { + h.wg.Add(1) + go func() { + cmd.Wait() + h.wg.Done() + }() + } + h.wg.Wait() + h.url = nil + h.cmds = nil + h.ctx = nil + h.cancel = nil + return nil +} + func (h *hgHandler) Handler(dir string, env []string, logger *log.Logger) (http.Handler, error) { if !h.Available() { return nil, ServerNotInstalledError{name: "hg"} @@ -50,10 +81,25 @@ func (h *hgHandler) Handler(dir string, env []string, logger *log.Logger) (http. // if "hg" works at all then "hg serve" works too, and we'll execute that as // a subprocess, using a reverse proxy to forward the request and response. - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() + h.mu.Lock() - cmd := exec.CommandContext(ctx, h.hgPath, "serve", "--port", "0", "--address", "localhost", "--accesslog", os.DevNull, "--name", "vcweb", "--print-url") + if h.ctx == nil { + h.ctx, h.cancel = context.WithCancel(context.Background()) + } + + // Cache the hg server subprocess globally, because hg is too slow + // to start a new one for each request. There are under a dozen different + // repos we serve, so leaving a dozen processes around is not a big deal. + u := h.url[dir] + if u != nil { + h.mu.Unlock() + logger.Printf("proxying hg request to %s", u) + httputil.NewSingleHostReverseProxy(u).ServeHTTP(w, req) + return + } + + logger.Printf("starting hg serve for %s", dir) + cmd := exec.CommandContext(h.ctx, h.hgPath, "serve", "--port", "0", "--address", "localhost", "--accesslog", os.DevNull, "--name", "vcweb", "--print-url") cmd.Dir = dir cmd.Env = append(slices.Clip(env), "PWD="+dir) @@ -74,47 +120,56 @@ func (h *hgHandler) Handler(dir string, env []string, logger *log.Logger) (http. stdout, err := cmd.StdoutPipe() if err != nil { + h.mu.Unlock() http.Error(w, err.Error(), http.StatusInternalServerError) return } if err := cmd.Start(); err != nil { + h.mu.Unlock() http.Error(w, err.Error(), http.StatusInternalServerError) return } - var wg sync.WaitGroup - defer func() { - cancel() - err := cmd.Wait() - if out := strings.TrimSuffix(stderr.String(), "interrupted!\n"); out != "" { - logger.Printf("%v: %v\n%s", cmd, err, out) - } else { - logger.Printf("%v", cmd) - } - wg.Wait() - }() r := bufio.NewReader(stdout) line, err := r.ReadString('\n') if err != nil { + h.mu.Unlock() + http.Error(w, err.Error(), http.StatusInternalServerError) return } // We have read what should be the server URL. 'hg serve' shouldn't need to // write anything else to stdout, but it's not a big deal if it does anyway. // Keep the stdout pipe open so that 'hg serve' won't get a SIGPIPE, but // actively discard its output so that it won't hang on a blocking write. - wg.Add(1) + h.wg.Add(1) go func() { io.Copy(io.Discard, r) - wg.Done() + h.wg.Done() }() - u, err := url.Parse(strings.TrimSpace(line)) + // On some systems, + // hg serve --address=localhost --print-url prints in-addr.arpa hostnames + // even though they cannot be looked up. + // Replace them with IP literals. + line = strings.ReplaceAll(line, "//1.0.0.127.in-addr.arpa", "//127.0.0.1") + line = strings.ReplaceAll(line, "//1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa", "//[::1]") + + u, err = url.Parse(strings.TrimSpace(line)) if err != nil { + h.mu.Unlock() logger.Printf("%v: %v", cmd, err) http.Error(w, err.Error(), http.StatusBadGateway) return } + + if h.url == nil { + h.url = make(map[string]*url.URL) + } + h.url[dir] = u + h.cmds = append(h.cmds, cmd) + h.mu.Unlock() + logger.Printf("proxying hg request to %s", u) httputil.NewSingleHostReverseProxy(u).ServeHTTP(w, req) }) diff --git a/src/cmd/go/internal/vcweb/script.go b/src/cmd/go/internal/vcweb/script.go index 3342ab200c6..0856c40677c 100644 --- a/src/cmd/go/internal/vcweb/script.go +++ b/src/cmd/go/internal/vcweb/script.go @@ -18,12 +18,14 @@ import ( "os" "os/exec" "path/filepath" + "regexp" "runtime" "strconv" "strings" "time" "golang.org/x/mod/module" + "golang.org/x/mod/semver" "golang.org/x/mod/zip" ) @@ -42,6 +44,7 @@ func newScriptEngine() *script.Engine { return script.OnceCondition(summary, func() (bool, error) { return f(), nil }) } add("bzr", lazyBool("the 'bzr' executable exists and provides the standard CLI", hasWorkingBzr)) + add("git-min-vers", script.PrefixCondition(" indicates a minimum git version", hasAtLeastGitVersion)) interrupt := func(cmd *exec.Cmd) error { return cmd.Process.Signal(os.Interrupt) } gracePeriod := 30 * time.Second // arbitrary @@ -394,3 +397,25 @@ func hasWorkingBzr() bool { err = exec.Command(bzr, "help").Run() return err == nil } + +var gitVersLineExtract = regexp.MustCompile(`git version\s+([\d.]+)`) + +func gitVersion() (string, error) { + gitOut, runErr := exec.Command("git", "version").CombinedOutput() + if runErr != nil { + return "v0", fmt.Errorf("failed to execute git version: %w", runErr) + } + matches := gitVersLineExtract.FindSubmatch(gitOut) + if len(matches) < 2 { + return "v0", fmt.Errorf("git version extraction regexp did not match version line: %q", gitOut) + } + return "v" + string(matches[1]), nil +} + +func hasAtLeastGitVersion(s *script.State, minVers string) (bool, error) { + gitVers, gitVersErr := gitVersion() + if gitVersErr != nil { + return false, gitVersErr + } + return semver.Compare(minVers, gitVers) <= 0, nil +} diff --git a/src/cmd/go/internal/vcweb/vcstest/vcstest.go b/src/cmd/go/internal/vcweb/vcstest/vcstest.go index 68278512922..224cfd79193 100644 --- a/src/cmd/go/internal/vcweb/vcstest/vcstest.go +++ b/src/cmd/go/internal/vcweb/vcstest/vcstest.go @@ -7,6 +7,7 @@ package vcstest import ( + "bytes" "cmd/go/internal/vcs" "cmd/go/internal/vcweb" "cmd/go/internal/web/intercept" @@ -70,7 +71,9 @@ func NewServer() (srv *Server, err error) { } }() - srvHTTP := httptest.NewServer(handler) + srvHTTP := httptest.NewUnstartedServer(handler) + srvHTTP.Config.ErrorLog = testLogger() + srvHTTP.Start() httpURL, err := url.Parse(srvHTTP.URL) if err != nil { return nil, err @@ -81,7 +84,9 @@ func NewServer() (srv *Server, err error) { } }() - srvHTTPS := httptest.NewTLSServer(handler) + srvHTTPS := httptest.NewUnstartedServer(handler) + srvHTTPS.Config.ErrorLog = testLogger() + srvHTTPS.StartTLS() httpsURL, err := url.Parse(srvHTTPS.URL) if err != nil { return nil, err @@ -115,6 +120,19 @@ func NewServer() (srv *Server, err error) { return srv, nil } +func testLogger() *log.Logger { + return log.New(httpLogger{}, "vcweb: ", 0) +} + +type httpLogger struct{} + +func (httpLogger) Write(b []byte) (int, error) { + if bytes.Contains(b, []byte("TLS handshake error")) { + return len(b), nil + } + return os.Stdout.Write(b) +} + func (srv *Server) Close() error { if vcs.VCSTestRepoURL != srv.HTTP.URL { panic("vcs URL hooks modified before Close") diff --git a/src/cmd/go/internal/vcweb/vcstest/vcstest_test.go b/src/cmd/go/internal/vcweb/vcstest/vcstest_test.go index 67234ac20d4..6a6a0eee57c 100644 --- a/src/cmd/go/internal/vcweb/vcstest/vcstest_test.go +++ b/src/cmd/go/internal/vcweb/vcstest/vcstest_test.go @@ -155,10 +155,10 @@ func TestScripts(t *testing.T) { t.Log(buf) } if err != nil { - if notInstalled := (vcweb.ServerNotInstalledError{}); errors.As(err, ¬Installed) || errors.Is(err, exec.ErrNotFound) { + if _, ok := errors.AsType[vcweb.ServerNotInstalledError](err); ok || errors.Is(err, exec.ErrNotFound) { t.Skip(err) } - if skip := (vcweb.SkipError{}); errors.As(err, &skip) { + if skip, ok := errors.AsType[vcweb.SkipError](err); ok { if skip.Msg == "" { t.Skip("SKIP") } else { diff --git a/src/cmd/go/internal/vcweb/vcweb.go b/src/cmd/go/internal/vcweb/vcweb.go index b81ff5e63de..98d39a3b1f2 100644 --- a/src/cmd/go/internal/vcweb/vcweb.go +++ b/src/cmd/go/internal/vcweb/vcweb.go @@ -199,8 +199,10 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { defer func() { if v := recover(); v != nil { - debug.PrintStack() - s.logger.Fatal(v) + if v == http.ErrAbortHandler { + panic(v) + } + s.logger.Fatalf("panic serving %s: %v\n%s", req.URL, v, debug.Stack()) } }() @@ -244,9 +246,9 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { }) if err != nil { s.logger.Print(err) - if notFound := (ScriptNotFoundError{}); errors.As(err, ¬Found) { + if _, ok := errors.AsType[ScriptNotFoundError](err); ok { http.NotFound(w, req) - } else if notInstalled := (ServerNotInstalledError{}); errors.As(err, ¬Installed) || errors.Is(err, exec.ErrNotFound) { + } else if _, ok := errors.AsType[ServerNotInstalledError](err); ok || errors.Is(err, exec.ErrNotFound) { http.Error(w, err.Error(), http.StatusNotImplemented) } else { http.Error(w, err.Error(), http.StatusInternalServerError) diff --git a/src/cmd/go/internal/version/version.go b/src/cmd/go/internal/version/version.go index c26dd42b4e1..781bc080e89 100644 --- a/src/cmd/go/internal/version/version.go +++ b/src/cmd/go/internal/version/version.go @@ -168,7 +168,7 @@ func scanFile(file string, info fs.FileInfo, mustPrint bool) bool { bi, err := buildinfo.ReadFile(file) if err != nil { if mustPrint { - if pathErr := (*os.PathError)(nil); errors.As(err, &pathErr) && filepath.Clean(pathErr.Path) == filepath.Clean(file) { + if pathErr, ok := errors.AsType[*os.PathError](err); ok && filepath.Clean(pathErr.Path) == filepath.Clean(file) { fmt.Fprintf(os.Stderr, "%v\n", file) } else { // Skip errors for non-Go binaries. diff --git a/src/cmd/go/internal/vet/vet.go b/src/cmd/go/internal/vet/vet.go index 3514be80feb..9055446325a 100644 --- a/src/cmd/go/internal/vet/vet.go +++ b/src/cmd/go/internal/vet/vet.go @@ -2,13 +2,20 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package vet implements the “go vet” command. +// Package vet implements the “go vet” and “go fix” commands. package vet import ( "context" + "encoding/json" + "errors" "fmt" - "path/filepath" + "io" + "os" + "slices" + "strconv" + "strings" + "sync" "cmd/go/internal/base" "cmd/go/internal/cfg" @@ -18,30 +25,41 @@ import ( "cmd/go/internal/work" ) -// Break init loop. -func init() { - CmdVet.Run = runVet -} - var CmdVet = &base.Command{ CustomFlags: true, UsageLine: "go vet [build flags] [-vettool prog] [vet flags] [packages]", Short: "report likely mistakes in packages", Long: ` -Vet runs the Go vet command on the packages named by the import paths. +Vet runs the Go vet tool (cmd/vet) on the named packages +and reports diagnostics. -For more about vet and its flags, see 'go doc cmd/vet'. -For more about specifying packages, see 'go help packages'. -For a list of checkers and their flags, see 'go tool vet help'. -For details of a specific checker such as 'printf', see 'go tool vet help printf'. +It supports these flags: -The -vettool=prog flag selects a different analysis tool with alternative -or additional checks. -For example, the 'shadow' analyzer can be built and run using these commands: + -c int + display offending line with this many lines of context (default -1) + -json + emit JSON output + -fix + instead of printing each diagnostic, apply its first fix (if any) + -diff + instead of applying each fix, print the patch as a unified diff + +The -vettool=prog flag selects a different analysis tool with +alternative or additional checks. For example, the 'shadow' analyzer +can be built and run using these commands: go install golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow@latest go vet -vettool=$(which shadow) +Alternative vet tools should be built atop golang.org/x/tools/go/analysis/unitchecker, +which handles the interaction with go vet. + +The default vet tool is 'go tool vet' or cmd/vet. +For help on its checkers and their flags, run 'go tool vet help'. +For details of a specific checker such as 'printf', see 'go tool vet help printf'. + +For more about specifying packages, see 'go help packages'. + The build flags supported by go vet are those that control package resolution and execution, such as -C, -n, -x, -v, -tags, and -toolexec. For more about these flags, see 'go help build'. @@ -50,9 +68,65 @@ See also: go fmt, go fix. `, } -func runVet(ctx context.Context, cmd *base.Command, args []string) { - vetFlags, pkgArgs := vetFlags(args) - modload.InitWorkfile() // The vet command does custom flag processing; initialize workspaces after that. +var CmdFix = &base.Command{ + CustomFlags: true, + UsageLine: "go fix [build flags] [-fixtool prog] [fix flags] [packages]", + Short: "apply fixes suggested by static checkers", + Long: ` +Fix runs the Go fix tool (cmd/fix) on the named packages +and applies suggested fixes. + +It supports these flags: + + -diff + instead of applying each fix, print the patch as a unified diff + +The -fixtool=prog flag selects a different analysis tool with +alternative or additional fixers; see the documentation for go vet's +-vettool flag for details. + +The default fix tool is 'go tool fix' or cmd/fix. +For help on its fixers and their flags, run 'go tool fix help'. +For details of a specific fixer such as 'hostport', see 'go tool fix help hostport'. + +For more about specifying packages, see 'go help packages'. + +The build flags supported by go fix are those that control package resolution +and execution, such as -C, -n, -x, -v, -tags, and -toolexec. +For more about these flags, see 'go help build'. + +See also: go fmt, go vet. + `, +} + +func init() { + // avoid initialization cycle + CmdVet.Run = run + CmdFix.Run = run + + addFlags(CmdVet) + addFlags(CmdFix) +} + +var ( + // "go vet -fix" causes fixes to be applied. + vetFixFlag = CmdVet.Flag.Bool("fix", false, "apply the first fix (if any) for each diagnostic") + + // The "go fix -fix=name,..." flag is an obsolete flag formerly + // used to pass a list of names to the old "cmd/fix -r". + fixFixFlag = CmdFix.Flag.String("fix", "", "obsolete; no effect") +) + +// run implements both "go vet" and "go fix". + +func run(ctx context.Context, cmd *base.Command, args []string) { + moduleLoaderState := modload.NewState() + // Compute flags for the vet/fix tool (e.g. cmd/{vet,fix}). + toolFlags, pkgArgs := toolFlags(cmd, args) + + // The vet/fix commands do custom flag processing; + // initialize workspaces after that. + moduleLoaderState.InitWorkfile() if cfg.DebugTrace != "" { var close func() error @@ -71,50 +145,292 @@ func runVet(ctx context.Context, cmd *base.Command, args []string) { ctx, span := trace.StartSpan(ctx, fmt.Sprint("Running ", cmd.Name(), " command")) defer span.Done() - work.BuildInit() - work.VetFlags = vetFlags - if len(vetFlags) > 0 { - work.VetExplicit = true - } - if vetTool != "" { - var err error - work.VetTool, err = filepath.Abs(vetTool) - if err != nil { - base.Fatalf("%v", err) + work.BuildInit(moduleLoaderState) + + // Flag theory: + // + // All flags supported by unitchecker are accepted by go {vet,fix}. + // Some arise from each analyzer in the tool (both to enable it + // and to configure it), whereas others [-V -c -diff -fix -flags -json] + // are core to unitchecker itself. + // + // Most are passed through to toolFlags, but not all: + // * -V and -flags are used by the handshake in the [toolFlags] function; + // * these old flags have no effect: [-all -source -tags -v]; and + // * the [-c -fix -diff -json] flags are handled specially + // as described below: + // + // command args tool args + // go vet => cmd/vet -json Parse stdout, print diagnostics to stderr. + // go vet -json => cmd/vet -json Pass stdout through. + // go vet -fix [-diff] => cmd/vet -fix [-diff] Pass stdout through. + // go fix [-diff] => cmd/fix -fix [-diff] Pass stdout through. + // go fix -json => cmd/fix -json Pass stdout through. + // + // Notes: + // * -diff requires "go vet -fix" or "go fix", and no -json. + // * -json output is the same in "vet" and "fix" modes, + // and describes both diagnostics and fixes (but does not apply them). + // * -c=n is supported by the unitchecker, but we reimplement it + // here (see printDiagnostics), and do not pass the flag through. + + work.VetExplicit = len(toolFlags) > 0 + + if cmd.Name() == "fix" || *vetFixFlag { + // fix mode: 'go fix' or 'go vet -fix' + if jsonFlag { + if diffFlag { + base.Fatalf("-json and -diff cannot be used together") + } + } else { + toolFlags = append(toolFlags, "-fix") + if diffFlag { + toolFlags = append(toolFlags, "-diff") + } + } + if contextFlag != -1 { + base.Fatalf("-c flag cannot be used when applying fixes") + } + } else { + // vet mode: 'go vet' without -fix + if !jsonFlag { + // Post-process the JSON diagnostics on stdout and format + // it as "file:line: message" diagnostics on stderr. + // (JSON reliably frames diagnostics, fixes, and errors so + // that we don't have to parse stderr or interpret non-zero + // exit codes, and interacts better with the action cache.) + toolFlags = append(toolFlags, "-json") + work.VetHandleStdout = printJSONDiagnostics + } + if diffFlag { + base.Fatalf("go vet -diff flag requires -fix") } } - pkgOpts := load.PackageOpts{ModResolveTests: true} - pkgs := load.PackagesAndErrors(ctx, pkgOpts, pkgArgs) - load.CheckPackageErrors(pkgs) - if len(pkgs) == 0 { - base.Fatalf("no packages to vet") + // Implement legacy "go fix -fix=name,..." flag. + if *fixFixFlag != "" { + fmt.Fprintf(os.Stderr, "go %s: the -fix=%s flag is obsolete and has no effect", cmd.Name(), *fixFixFlag) + + // The buildtag fixer is now implemented by cmd/fix. + if slices.Contains(strings.Split(*fixFixFlag, ","), "buildtag") { + fmt.Fprintf(os.Stderr, "go %s: to enable the buildtag check, use -buildtag", cmd.Name()) + } } - b := work.NewBuilder("") + work.VetFlags = toolFlags + + pkgOpts := load.PackageOpts{ModResolveTests: true} + pkgs := load.PackagesAndErrors(moduleLoaderState, ctx, pkgOpts, pkgArgs) + load.CheckPackageErrors(pkgs) + if len(pkgs) == 0 { + base.Fatalf("no packages to %s", cmd.Name()) + } + + b := work.NewBuilder("", moduleLoaderState.VendorDirOrEmpty) defer func() { if err := b.Close(); err != nil { base.Fatal(err) } }() - root := &work.Action{Mode: "go vet"} + // To avoid file corruption from duplicate application of + // fixes (in fix mode), and duplicate reporting of diagnostics + // (in vet mode), we must run the tool only once for each + // source file. We achieve that by running on ptest (below) + // instead of p. + // + // As a side benefit, this also allows analyzers to make + // "closed world" assumptions and report diagnostics (such as + // "this symbol is unused") that might be false if computed + // from just the primary package p, falsified by the + // additional declarations in test files. + // + // We needn't worry about intermediate test variants, as they + // will only be executed in VetxOnly mode, for facts but not + // diagnostics. + + root := &work.Action{Mode: "go " + cmd.Name()} for _, p := range pkgs { - _, ptest, pxtest, perr := load.TestPackagesFor(ctx, pkgOpts, p, nil) + _, ptest, pxtest, perr := load.TestPackagesFor(moduleLoaderState, ctx, pkgOpts, p, nil) if perr != nil { base.Errorf("%v", perr.Error) continue } if len(ptest.GoFiles) == 0 && len(ptest.CgoFiles) == 0 && pxtest == nil { - base.Errorf("go: can't vet %s: no Go files in %s", p.ImportPath, p.Dir) + base.Errorf("go: can't %s %s: no Go files in %s", cmd.Name(), p.ImportPath, p.Dir) continue } if len(ptest.GoFiles) > 0 || len(ptest.CgoFiles) > 0 { - root.Deps = append(root.Deps, b.VetAction(work.ModeBuild, work.ModeBuild, ptest)) + // The test package includes all the files of primary package. + root.Deps = append(root.Deps, b.VetAction(moduleLoaderState, work.ModeBuild, work.ModeBuild, ptest)) } if pxtest != nil { - root.Deps = append(root.Deps, b.VetAction(work.ModeBuild, work.ModeBuild, pxtest)) + root.Deps = append(root.Deps, b.VetAction(moduleLoaderState, work.ModeBuild, work.ModeBuild, pxtest)) } } b.Do(ctx, root) } + +// printJSONDiagnostics parses JSON (from the tool's stdout) and +// prints it (to stderr) in "file:line: message" form. +// It also ensures that we exit nonzero if there were diagnostics. +func printJSONDiagnostics(r io.Reader) error { + stdout, err := io.ReadAll(r) + if err != nil { + return err + } + if len(stdout) > 0 { + // unitchecker emits a JSON map of the form: + // output maps Package ID -> Analyzer.Name -> (error | []Diagnostic); + var tree jsonTree + if err := json.Unmarshal(stdout, &tree); err != nil { + return fmt.Errorf("parsing JSON: %v", err) + } + for _, units := range tree { + for analyzer, msg := range units { + if msg[0] == '[' { + // []Diagnostic + var diags []jsonDiagnostic + if err := json.Unmarshal([]byte(msg), &diags); err != nil { + return fmt.Errorf("parsing JSON diagnostics: %v", err) + } + for _, diag := range diags { + base.SetExitStatus(1) + printJSONDiagnostic(analyzer, diag) + } + } else { + // error + var e jsonError + if err := json.Unmarshal([]byte(msg), &e); err != nil { + return fmt.Errorf("parsing JSON error: %v", err) + } + + base.SetExitStatus(1) + return errors.New(e.Err) + } + } + } + } + return nil +} + +var stderrMu sync.Mutex // serializes concurrent writes to stdout + +func printJSONDiagnostic(analyzer string, diag jsonDiagnostic) { + stderrMu.Lock() + defer stderrMu.Unlock() + + type posn struct { + file string + line, col int + } + parsePosn := func(s string) (_ posn, _ bool) { + colon2 := strings.LastIndexByte(s, ':') + if colon2 < 0 { + return + } + colon1 := strings.LastIndexByte(s[:colon2], ':') + if colon1 < 0 { + return + } + line, err := strconv.Atoi(s[colon1+len(":") : colon2]) + if err != nil { + return + } + col, err := strconv.Atoi(s[colon2+len(":"):]) + if err != nil { + return + } + return posn{s[:colon1], line, col}, true + } + + print := func(start, end, message string) { + if posn, ok := parsePosn(start); ok { + // The (*work.Shell).reportCmd method relativizes the + // prefix of each line of the subprocess's stdout; + // but filenames in JSON aren't at the start of the line, + // so we need to apply ShortPath here too. + fmt.Fprintf(os.Stderr, "%s:%d:%d: %v\n", base.ShortPath(posn.file), posn.line, posn.col, message) + } else { + fmt.Fprintf(os.Stderr, "%s: %v\n", start, message) + } + + // -c=n: show offending line plus N lines of context. + // (Duplicates logic in unitchecker; see analysisflags.PrintPlain.) + if contextFlag >= 0 { + if end == "" { + end = start + } + var ( + startPosn, ok1 = parsePosn(start) + endPosn, ok2 = parsePosn(end) + ) + if ok1 && ok2 { + // TODO(adonovan): respect overlays (like unitchecker does). + data, _ := os.ReadFile(startPosn.file) + lines := strings.Split(string(data), "\n") + for i := startPosn.line - contextFlag; i <= endPosn.line+contextFlag; i++ { + if 1 <= i && i <= len(lines) { + fmt.Fprintf(os.Stderr, "%d\t%s\n", i, lines[i-1]) + } + } + } + } + } + + // TODO(adonovan): append " [analyzer]" to message. But we must first relax + // x/tools/go/analysis/internal/versiontest.TestVettool and revendor; sigh. + _ = analyzer + print(diag.Posn, diag.End, diag.Message) + for _, rel := range diag.Related { + print(rel.Posn, rel.End, "\t"+rel.Message) + } +} + +// -- JSON schema -- + +// (populated by golang.org/x/tools/go/analysis/internal/analysisflags/flags.go) + +// A jsonTree is a mapping from package ID to analysis name to result. +// Each result is either a jsonError or a list of jsonDiagnostic. +type jsonTree map[string]map[string]json.RawMessage + +type jsonError struct { + Err string `json:"error"` +} + +// A jsonTextEdit describes the replacement of a portion of a file. +// Start and End are zero-based half-open indices into the original byte +// sequence of the file, and New is the new text. +type jsonTextEdit struct { + Filename string `json:"filename"` + Start int `json:"start"` + End int `json:"end"` + New string `json:"new"` +} + +// A jsonSuggestedFix describes an edit that should be applied as a whole or not +// at all. It might contain multiple TextEdits/text_edits if the SuggestedFix +// consists of multiple non-contiguous edits. +type jsonSuggestedFix struct { + Message string `json:"message"` + Edits []jsonTextEdit `json:"edits"` +} + +// A jsonDiagnostic describes the json schema of an analysis.Diagnostic. +type jsonDiagnostic struct { + Category string `json:"category,omitempty"` + Posn string `json:"posn"` // e.g. "file.go:line:column" + End string `json:"end"` + Message string `json:"message"` + SuggestedFixes []jsonSuggestedFix `json:"suggested_fixes,omitempty"` + Related []jsonRelatedInformation `json:"related,omitempty"` +} + +// A jsonRelatedInformation describes a secondary position and message related to +// a primary diagnostic. +type jsonRelatedInformation struct { + Posn string `json:"posn"` // e.g. "file.go:line:column" + End string `json:"end"` + Message string `json:"message"` +} diff --git a/src/cmd/go/internal/vet/vetflag.go b/src/cmd/go/internal/vet/vetflag.go index d0bdb58a504..7342b99d6e3 100644 --- a/src/cmd/go/internal/vet/vetflag.go +++ b/src/cmd/go/internal/vet/vetflag.go @@ -21,70 +21,83 @@ import ( "cmd/go/internal/work" ) -// go vet flag processing -// -// We query the flags of the tool specified by -vettool and accept any -// of those flags plus any flag valid for 'go build'. The tool must -// support -flags, which prints a description of its flags in JSON to -// stdout. +// go vet/fix flag processing +var ( + // We query the flags of the tool specified by -{vet,fix}tool + // and accept any of those flags plus any flag valid for 'go + // build'. The tool must support -flags, which prints a + // description of its flags in JSON to stdout. -// vetTool specifies the vet command to run. -// Any tool that supports the (still unpublished) vet -// command-line protocol may be supplied; see -// golang.org/x/tools/go/analysis/unitchecker for one -// implementation. It is also used by tests. -// -// The default behavior (vetTool=="") runs 'go tool vet'. -var vetTool string // -vettool + // toolFlag specifies the vet/fix command to run. + // Any toolFlag that supports the (unpublished) vet + // command-line protocol may be supplied; see + // golang.org/x/tools/go/analysis/unitchecker for the + // sole implementation. It is also used by tests. + // + // The default behavior ("") runs 'go tool {vet,fix}'. + // + // Do not access this flag directly; use [parseToolFlag]. + toolFlag string // -{vet,fix}tool + diffFlag bool // -diff + jsonFlag bool // -json + contextFlag = -1 // -c=n +) -func init() { - // For now, we omit the -json flag for vet because we could plausibly - // support -json specific to the vet command in the future (perhaps using - // the same format as build -json). - work.AddBuildFlags(CmdVet, work.OmitJSONFlag) - CmdVet.Flag.StringVar(&vetTool, "vettool", "", "") +func addFlags(cmd *base.Command) { + // We run the compiler for export data. + // Suppress the build -json flag; we define our own. + work.AddBuildFlags(cmd, work.OmitJSONFlag) + + cmd.Flag.StringVar(&toolFlag, cmd.Name()+"tool", "", "") // -vettool or -fixtool + cmd.Flag.BoolVar(&diffFlag, "diff", false, "print diff instead of applying it") + cmd.Flag.BoolVar(&jsonFlag, "json", false, "print diagnostics and fixes as JSON") + cmd.Flag.IntVar(&contextFlag, "c", -1, "display offending line with this many lines of context") } -func parseVettoolFlag(args []string) { - // Extract -vettool by ad hoc flag processing: +// parseToolFlag scans args for -{vet,fix}tool and returns the effective tool filename. +func parseToolFlag(cmd *base.Command, args []string) string { + toolFlagName := cmd.Name() + "tool" // vettool or fixtool + + // Extract -{vet,fix}tool by ad hoc flag processing: // its value is needed even before we can declare // the flags available during main flag processing. for i, arg := range args { - if arg == "-vettool" || arg == "--vettool" { + if arg == "-"+toolFlagName || arg == "--"+toolFlagName { if i+1 >= len(args) { log.Fatalf("%s requires a filename", arg) } - vetTool = args[i+1] - return - } else if strings.HasPrefix(arg, "-vettool=") || - strings.HasPrefix(arg, "--vettool=") { - vetTool = arg[strings.IndexByte(arg, '=')+1:] - return + toolFlag = args[i+1] + break + } else if strings.HasPrefix(arg, "-"+toolFlagName+"=") || + strings.HasPrefix(arg, "--"+toolFlagName+"=") { + toolFlag = arg[strings.IndexByte(arg, '=')+1:] + break } } -} -// vetFlags processes the command line, splitting it at the first non-flag -// into the list of flags and list of packages. -func vetFlags(args []string) (passToVet, packageNames []string) { - parseVettoolFlag(args) - - // Query the vet command for its flags. - var tool string - if vetTool == "" { - tool = base.Tool("vet") - } else { - var err error - tool, err = filepath.Abs(vetTool) + if toolFlag != "" { + tool, err := filepath.Abs(toolFlag) if err != nil { log.Fatal(err) } + return tool } + + return base.Tool(cmd.Name()) // default to 'go tool vet|fix' +} + +// toolFlags processes the command line, splitting it at the first non-flag +// into the list of flags and list of packages. +func toolFlags(cmd *base.Command, args []string) (passToTool, packageNames []string) { + tool := parseToolFlag(cmd, args) + work.VetTool = tool + + // Query the tool for its flags. out := new(bytes.Buffer) - vetcmd := exec.Command(tool, "-flags") - vetcmd.Stdout = out - if err := vetcmd.Run(); err != nil { - fmt.Fprintf(os.Stderr, "go: can't execute %s -flags: %v\n", tool, err) + toolcmd := exec.Command(tool, "-flags") + toolcmd.Stdout = out + if err := toolcmd.Run(); err != nil { + fmt.Fprintf(os.Stderr, "go: %s -flags failed: %v\n", tool, err) base.SetExitStatus(2) base.Exit() } @@ -99,15 +112,20 @@ func vetFlags(args []string) (passToVet, packageNames []string) { base.Exit() } - // Add vet's flags to CmdVet.Flag. + // Add tool's flags to cmd.Flag. // - // Some flags, in particular -tags and -v, are known to vet but + // Some flags, in particular -tags and -v, are known to the tool but // also defined as build flags. This works fine, so we omit duplicates here. - // However some, like -x, are known to the build but not to vet. - isVetFlag := make(map[string]bool, len(analysisFlags)) - cf := CmdVet.Flag + // However some, like -x, are known to the build but not to the tool. + isToolFlag := make(map[string]bool, len(analysisFlags)) + cf := cmd.Flag for _, f := range analysisFlags { - isVetFlag[f.Name] = true + // We reimplement the unitchecker's -c=n flag. + // Don't allow it to be passed through. + if f.Name == "c" { + continue + } + isToolFlag[f.Name] = true if cf.Lookup(f.Name) == nil { if f.Bool { cf.Bool(f.Name, false, "") @@ -117,22 +135,22 @@ func vetFlags(args []string) (passToVet, packageNames []string) { } } - // Record the set of vet tool flags set by GOFLAGS. We want to pass them to - // the vet tool, but only if they aren't overridden by an explicit argument. - base.SetFromGOFLAGS(&CmdVet.Flag) + // Record the set of tool flags set by GOFLAGS. We want to pass them to + // the tool, but only if they aren't overridden by an explicit argument. + base.SetFromGOFLAGS(&cmd.Flag) addFromGOFLAGS := map[string]bool{} - CmdVet.Flag.Visit(func(f *flag.Flag) { - if isVetFlag[f.Name] { + cmd.Flag.Visit(func(f *flag.Flag) { + if isToolFlag[f.Name] { addFromGOFLAGS[f.Name] = true } }) explicitFlags := make([]string, 0, len(args)) for len(args) > 0 { - f, remainingArgs, err := cmdflag.ParseOne(&CmdVet.Flag, args) + f, remainingArgs, err := cmdflag.ParseOne(&cmd.Flag, args) if errors.Is(err, flag.ErrHelp) { - exitWithUsage() + exitWithUsage(cmd) } if errors.Is(err, cmdflag.ErrFlagTerminator) { @@ -142,7 +160,7 @@ func vetFlags(args []string) (passToVet, packageNames []string) { break } - if nf := (cmdflag.NonFlagError{}); errors.As(err, &nf) { + if _, ok := errors.AsType[cmdflag.NonFlagError](err); ok { // Everything from here on out — including the argument we just consumed — // must be a package name. packageNames = args @@ -151,12 +169,12 @@ func vetFlags(args []string) (passToVet, packageNames []string) { if err != nil { fmt.Fprintln(os.Stderr, err) - exitWithUsage() + exitWithUsage(cmd) } - if isVetFlag[f.Name] { + if isToolFlag[f.Name] { // Forward the raw arguments rather than cleaned equivalents, just in - // case the vet tool parses them idiosyncratically. + // case the tool parses them idiosyncratically. explicitFlags = append(explicitFlags, args[:len(args)-len(remainingArgs)]...) // This flag has been overridden explicitly, so don't forward its implicit @@ -168,26 +186,26 @@ func vetFlags(args []string) (passToVet, packageNames []string) { } // Prepend arguments from GOFLAGS before other arguments. - CmdVet.Flag.Visit(func(f *flag.Flag) { + cmd.Flag.Visit(func(f *flag.Flag) { if addFromGOFLAGS[f.Name] { - passToVet = append(passToVet, fmt.Sprintf("-%s=%s", f.Name, f.Value)) + passToTool = append(passToTool, fmt.Sprintf("-%s=%s", f.Name, f.Value)) } }) - passToVet = append(passToVet, explicitFlags...) - return passToVet, packageNames + passToTool = append(passToTool, explicitFlags...) + return passToTool, packageNames } -func exitWithUsage() { - fmt.Fprintf(os.Stderr, "usage: %s\n", CmdVet.UsageLine) - fmt.Fprintf(os.Stderr, "Run 'go help %s' for details.\n", CmdVet.LongName()) +func exitWithUsage(cmd *base.Command) { + fmt.Fprintf(os.Stderr, "usage: %s\n", cmd.UsageLine) + fmt.Fprintf(os.Stderr, "Run 'go help %s' for details.\n", cmd.LongName()) // This part is additional to what (*Command).Usage does: - cmd := "go tool vet" - if vetTool != "" { - cmd = vetTool + tool := toolFlag + if tool == "" { + tool = "go tool " + cmd.Name() } - fmt.Fprintf(os.Stderr, "Run '%s help' for a full list of flags and analyzers.\n", cmd) - fmt.Fprintf(os.Stderr, "Run '%s -help' for an overview.\n", cmd) + fmt.Fprintf(os.Stderr, "Run '%s help' for a full list of flags and analyzers.\n", tool) + fmt.Fprintf(os.Stderr, "Run '%s -help' for an overview.\n", tool) base.SetExitStatus(2) base.Exit() diff --git a/src/cmd/go/internal/work/action.go b/src/cmd/go/internal/work/action.go index 3636f642e26..698a523c251 100644 --- a/src/cmd/go/internal/work/action.go +++ b/src/cmd/go/internal/work/action.go @@ -28,6 +28,7 @@ import ( "cmd/go/internal/cache" "cmd/go/internal/cfg" "cmd/go/internal/load" + "cmd/go/internal/modload" "cmd/go/internal/str" "cmd/go/internal/trace" "cmd/internal/buildid" @@ -39,6 +40,7 @@ import ( // build packages in parallel, and the builder is shared. type Builder struct { WorkDir string // the temporary work directory (ends in filepath.Separator) + getVendorDir func() string // TODO(jitsu): remove this after we eliminate global module state actionCache map[cacheKey]*Action // a cache of already-constructed actions flagCache map[[2]string]bool // a cache of supported compiler flags gccCompilerIDCache map[string]cache.ActionID // cache for gccCompilerID @@ -274,8 +276,9 @@ const ( // and arranges for it to be removed in case of an unclean exit. // The caller must Close the builder explicitly to clean up the WorkDir // before a clean exit. -func NewBuilder(workDir string) *Builder { +func NewBuilder(workDir string, getVendorDir func() string) *Builder { b := new(Builder) + b.getVendorDir = getVendorDir b.actionCache = make(map[cacheKey]*Action) b.gccToolIDCache = make(map[string]string) @@ -392,7 +395,7 @@ func (b *Builder) NewObjdir() string { // at shlibpath. For the native toolchain this list is stored, newline separated, in // an ELF note with name "Go\x00\x00" and type 1. For GCCGO it is extracted from the // .go_export section. -func readpkglist(shlibpath string) (pkgs []*load.Package) { +func readpkglist(s *modload.State, shlibpath string) (pkgs []*load.Package) { var stk load.ImportStack if cfg.BuildToolchainName == "gccgo" { f, err := elf.Open(shlibpath) @@ -412,7 +415,7 @@ func readpkglist(shlibpath string) (pkgs []*load.Package) { for _, line := range bytes.Split(data, []byte{'\n'}) { if path, found := bytes.CutPrefix(line, pkgpath); found { path = bytes.TrimSuffix(path, []byte{';'}) - pkgs = append(pkgs, load.LoadPackageWithFlags(string(path), base.Cwd(), &stk, nil, 0)) + pkgs = append(pkgs, load.LoadPackageWithFlags(s, string(path), base.Cwd(), &stk, nil, 0)) } } } else { @@ -423,7 +426,7 @@ func readpkglist(shlibpath string) (pkgs []*load.Package) { scanner := bufio.NewScanner(bytes.NewBuffer(pkglistbytes)) for scanner.Scan() { t := scanner.Text() - pkgs = append(pkgs, load.LoadPackageWithFlags(t, base.Cwd(), &stk, nil, 0)) + pkgs = append(pkgs, load.LoadPackageWithFlags(s, t, base.Cwd(), &stk, nil, 0)) } } return @@ -443,9 +446,9 @@ func (b *Builder) cacheAction(mode string, p *load.Package, f func() *Action) *A } // AutoAction returns the "right" action for go build or go install of p. -func (b *Builder) AutoAction(mode, depMode BuildMode, p *load.Package) *Action { +func (b *Builder) AutoAction(s *modload.State, mode, depMode BuildMode, p *load.Package) *Action { if p.Name == "main" { - return b.LinkAction(mode, depMode, p) + return b.LinkAction(s, mode, depMode, p) } return b.CompileAction(mode, depMode, p) } @@ -866,13 +869,13 @@ func (b *Builder) cgoAction(p *load.Package, objdir string, deps []*Action, hasC // It depends on the action for compiling p. // If the caller may be causing p to be installed, it is up to the caller // to make sure that the install depends on (runs after) vet. -func (b *Builder) VetAction(mode, depMode BuildMode, p *load.Package) *Action { - a := b.vetAction(mode, depMode, p) +func (b *Builder) VetAction(s *modload.State, mode, depMode BuildMode, p *load.Package) *Action { + a := b.vetAction(s, mode, depMode, p) a.VetxOnly = false return a } -func (b *Builder) vetAction(mode, depMode BuildMode, p *load.Package) *Action { +func (b *Builder) vetAction(s *modload.State, mode, depMode BuildMode, p *load.Package) *Action { // Construct vet action. a := b.cacheAction("vet", p, func() *Action { a1 := b.CompileAction(mode|ModeVetOnly, depMode, p) @@ -888,7 +891,7 @@ func (b *Builder) vetAction(mode, depMode BuildMode, p *load.Package) *Action { deps = []*Action{a1} } for _, p1 := range p.Internal.Imports { - deps = append(deps, b.vetAction(mode, depMode, p1)) + deps = append(deps, b.vetAction(s, mode, depMode, p1)) } a := &Action{ @@ -913,7 +916,7 @@ func (b *Builder) vetAction(mode, depMode BuildMode, p *load.Package) *Action { // LinkAction returns the action for linking p into an executable // and possibly installing the result (according to mode). // depMode is the action (build or install) to use when compiling dependencies. -func (b *Builder) LinkAction(mode, depMode BuildMode, p *load.Package) *Action { +func (b *Builder) LinkAction(s *modload.State, mode, depMode BuildMode, p *load.Package) *Action { // Construct link action. a := b.cacheAction("link", p, func() *Action { a := &Action{ @@ -948,7 +951,7 @@ func (b *Builder) LinkAction(mode, depMode BuildMode, p *load.Package) *Action { } a.Target = a.Objdir + filepath.Join("exe", name) + cfg.ExeSuffix a.built = a.Target - b.addTransitiveLinkDeps(a, a1, "") + b.addTransitiveLinkDeps(s, a, a1, "") // Sequence the build of the main package (a1) strictly after the build // of all other dependencies that go into the link. It is likely to be after @@ -1034,7 +1037,7 @@ func (b *Builder) installAction(a1 *Action, mode BuildMode) *Action { // makes sure those are present in a.Deps. // If shlib is non-empty, then a corresponds to the build and installation of shlib, // so any rebuild of shlib should not be added as a dependency. -func (b *Builder) addTransitiveLinkDeps(a, a1 *Action, shlib string) { +func (b *Builder) addTransitiveLinkDeps(s *modload.State, a, a1 *Action, shlib string) { // Expand Deps to include all built packages, for the linker. // Use breadth-first search to find rebuilt-for-test packages // before the standard ones. @@ -1075,7 +1078,7 @@ func (b *Builder) addTransitiveLinkDeps(a, a1 *Action, shlib string) { // we'll end up building an overall library or executable that depends at runtime // on other libraries that are out-of-date, which is clearly not good either. // We call it ModeBuggyInstall to make clear that this is not right. - a.Deps = append(a.Deps, b.linkSharedAction(ModeBuggyInstall, ModeBuggyInstall, p1.Shlib, nil)) + a.Deps = append(a.Deps, b.linkSharedAction(s, ModeBuggyInstall, ModeBuggyInstall, p1.Shlib, nil)) } } } @@ -1111,26 +1114,26 @@ func (b *Builder) addInstallHeaderAction(a *Action) { // buildmodeShared takes the "go build" action a1 into the building of a shared library of a1.Deps. // That is, the input a1 represents "go build pkgs" and the result represents "go build -buildmode=shared pkgs". -func (b *Builder) buildmodeShared(mode, depMode BuildMode, args []string, pkgs []*load.Package, a1 *Action) *Action { +func (b *Builder) buildmodeShared(s *modload.State, mode, depMode BuildMode, args []string, pkgs []*load.Package, a1 *Action) *Action { name, err := libname(args, pkgs) if err != nil { base.Fatalf("%v", err) } - return b.linkSharedAction(mode, depMode, name, a1) + return b.linkSharedAction(s, mode, depMode, name, a1) } // linkSharedAction takes a grouping action a1 corresponding to a list of built packages // and returns an action that links them together into a shared library with the name shlib. // If a1 is nil, shlib should be an absolute path to an existing shared library, // and then linkSharedAction reads that library to find out the package list. -func (b *Builder) linkSharedAction(mode, depMode BuildMode, shlib string, a1 *Action) *Action { +func (b *Builder) linkSharedAction(s *modload.State, mode, depMode BuildMode, shlib string, a1 *Action) *Action { fullShlib := shlib shlib = filepath.Base(shlib) a := b.cacheAction("build-shlib "+shlib, nil, func() *Action { if a1 == nil { // TODO(rsc): Need to find some other place to store config, // not in pkg directory. See golang.org/issue/22196. - pkgs := readpkglist(fullShlib) + pkgs := readpkglist(s, fullShlib) a1 = &Action{ Mode: "shlib packages", } @@ -1144,8 +1147,8 @@ func (b *Builder) linkSharedAction(mode, depMode BuildMode, shlib string, a1 *Ac // we let them use the flags specified for the command-line arguments. p := &load.Package{} p.Internal.CmdlinePkg = true - p.Internal.Ldflags = load.BuildLdflags.For(p) - p.Internal.Gccgoflags = load.BuildGccgoflags.For(p) + p.Internal.Ldflags = load.BuildLdflags.For(s, p) + p.Internal.Gccgoflags = load.BuildGccgoflags.For(s, p) // Add implicit dependencies to pkgs list. // Currently buildmode=shared forces external linking mode, and @@ -1173,7 +1176,7 @@ func (b *Builder) linkSharedAction(mode, depMode BuildMode, shlib string, a1 *Ac } } var stk load.ImportStack - p := load.LoadPackageWithFlags(pkg, base.Cwd(), &stk, nil, 0) + p := load.LoadPackageWithFlags(s, pkg, base.Cwd(), &stk, nil, 0) if p.Error != nil { base.Fatalf("load %s: %v", pkg, p.Error) } @@ -1193,7 +1196,7 @@ func (b *Builder) linkSharedAction(mode, depMode BuildMode, shlib string, a1 *Ac // The linker step still needs all the usual linker deps. // (For example, the linker always opens runtime.a.) - ldDeps, err := load.LinkerDeps(nil) + ldDeps, err := load.LinkerDeps(s, nil) if err != nil { base.Error(err) } @@ -1201,7 +1204,7 @@ func (b *Builder) linkSharedAction(mode, depMode BuildMode, shlib string, a1 *Ac add(a, dep, true) } } - b.addTransitiveLinkDeps(a, a1, shlib) + b.addTransitiveLinkDeps(s, a, a1, shlib) return a }) diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go index 6741b39f051..c483c19c65b 100644 --- a/src/cmd/go/internal/work/build.go +++ b/src/cmd/go/internal/work/build.go @@ -459,16 +459,17 @@ func oneMainPkg(pkgs []*load.Package) []*load.Package { var pkgsFilter = func(pkgs []*load.Package) []*load.Package { return pkgs } func runBuild(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() - BuildInit() - b := NewBuilder("") + moduleLoaderState := modload.NewState() + moduleLoaderState.InitWorkfile() + BuildInit(moduleLoaderState) + b := NewBuilder("", moduleLoaderState.VendorDirOrEmpty) defer func() { if err := b.Close(); err != nil { base.Fatal(err) } }() - pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{AutoVCS: true}, args) + pkgs := load.PackagesAndErrors(moduleLoaderState, ctx, load.PackageOpts{AutoVCS: true}, args) load.CheckPackageErrors(pkgs) explicitO := len(cfg.BuildO) > 0 @@ -503,7 +504,7 @@ func runBuild(ctx context.Context, cmd *base.Command, args []string) { } if cfg.BuildCover { - load.PrepareForCoverageBuild(pkgs) + load.PrepareForCoverageBuild(moduleLoaderState, pkgs) } if cfg.BuildO != "" { @@ -527,7 +528,7 @@ func runBuild(ctx context.Context, cmd *base.Command, args []string) { p.Target += cfg.ExeSuffix p.Stale = true p.StaleReason = "build -o flag in use" - a.Deps = append(a.Deps, b.AutoAction(ModeInstall, depMode, p)) + a.Deps = append(a.Deps, b.AutoAction(moduleLoaderState, ModeInstall, depMode, p)) } if len(a.Deps) == 0 { base.Fatalf("go: no main packages to build") @@ -544,17 +545,17 @@ func runBuild(ctx context.Context, cmd *base.Command, args []string) { p.Target = cfg.BuildO p.Stale = true // must build - not up to date p.StaleReason = "build -o flag in use" - a := b.AutoAction(ModeInstall, depMode, p) + a := b.AutoAction(moduleLoaderState, ModeInstall, depMode, p) b.Do(ctx, a) return } a := &Action{Mode: "go build"} for _, p := range pkgs { - a.Deps = append(a.Deps, b.AutoAction(ModeBuild, depMode, p)) + a.Deps = append(a.Deps, b.AutoAction(moduleLoaderState, ModeBuild, depMode, p)) } if cfg.BuildBuildmode == "shared" { - a = b.buildmodeShared(ModeBuild, depMode, args, pkgs, a) + a = b.buildmodeShared(moduleLoaderState, ModeBuild, depMode, args, pkgs, a) } b.Do(ctx, a) } @@ -687,17 +688,18 @@ func libname(args []string, pkgs []*load.Package) (string, error) { } func runInstall(ctx context.Context, cmd *base.Command, args []string) { + moduleLoaderState := modload.NewState() for _, arg := range args { if strings.Contains(arg, "@") && !build.IsLocalImport(arg) && !filepath.IsAbs(arg) { - installOutsideModule(ctx, args) + installOutsideModule(moduleLoaderState, ctx, args) return } } - modload.InitWorkfile() - BuildInit() - pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{AutoVCS: true}, args) - if cfg.ModulesEnabled && !modload.HasModRoot() { + moduleLoaderState.InitWorkfile() + BuildInit(moduleLoaderState) + pkgs := load.PackagesAndErrors(moduleLoaderState, ctx, load.PackageOpts{AutoVCS: true}, args) + if cfg.ModulesEnabled && !moduleLoaderState.HasModRoot() { haveErrors := false allMissingErrors := true for _, pkg := range pkgs { @@ -705,7 +707,7 @@ func runInstall(ctx context.Context, cmd *base.Command, args []string) { continue } haveErrors = true - if missingErr := (*modload.ImportMissingError)(nil); !errors.As(pkg.Error, &missingErr) { + if _, ok := errors.AsType[*modload.ImportMissingError](pkg.Error); !ok { allMissingErrors = false break } @@ -722,10 +724,10 @@ func runInstall(ctx context.Context, cmd *base.Command, args []string) { load.CheckPackageErrors(pkgs) if cfg.BuildCover { - load.PrepareForCoverageBuild(pkgs) + load.PrepareForCoverageBuild(moduleLoaderState, pkgs) } - InstallPackages(ctx, args, pkgs) + InstallPackages(moduleLoaderState, ctx, args, pkgs) } // omitTestOnly returns pkgs with test-only packages removed. @@ -745,7 +747,7 @@ func omitTestOnly(pkgs []*load.Package) []*load.Package { return list } -func InstallPackages(ctx context.Context, patterns []string, pkgs []*load.Package) { +func InstallPackages(loaderstate *modload.State, ctx context.Context, patterns []string, pkgs []*load.Package) { ctx, span := trace.StartSpan(ctx, "InstallPackages "+strings.Join(patterns, " ")) defer span.Done() @@ -783,7 +785,7 @@ func InstallPackages(ctx context.Context, patterns []string, pkgs []*load.Packag } base.ExitIfErrors() - b := NewBuilder("") + b := NewBuilder("", loaderstate.VendorDirOrEmpty) defer func() { if err := b.Close(); err != nil { base.Fatal(err) @@ -797,7 +799,7 @@ func InstallPackages(ctx context.Context, patterns []string, pkgs []*load.Packag // If p is a tool, delay the installation until the end of the build. // This avoids installing assemblers/compilers that are being executed // by other steps in the build. - a1 := b.AutoAction(ModeInstall, depMode, p) + a1 := b.AutoAction(loaderstate, ModeInstall, depMode, p) if load.InstallTargetDir(p) == load.ToTool { a.Deps = append(a.Deps, a1.Deps...) a1.Deps = append(a1.Deps, a) @@ -819,7 +821,7 @@ func InstallPackages(ctx context.Context, patterns []string, pkgs []*load.Packag // tools above did not apply, and a is just a simple Action // with a list of Deps, one per package named in pkgs, // the same as in runBuild. - a = b.buildmodeShared(ModeInstall, ModeInstall, patterns, pkgs, a) + a = b.buildmodeShared(loaderstate, ModeInstall, ModeInstall, patterns, pkgs, a) } b.Do(ctx, a) @@ -858,12 +860,12 @@ func InstallPackages(ctx context.Context, patterns []string, pkgs []*load.Packag // in the current directory or parent directories. // // See golang.org/issue/40276 for details and rationale. -func installOutsideModule(ctx context.Context, args []string) { - modload.ForceUseModules = true - modload.RootMode = modload.NoRoot - modload.AllowMissingModuleImports() - modload.Init() - BuildInit() +func installOutsideModule(loaderstate *modload.State, ctx context.Context, args []string) { + loaderstate.ForceUseModules = true + loaderstate.RootMode = modload.NoRoot + loaderstate.AllowMissingModuleImports() + modload.Init(loaderstate) + BuildInit(loaderstate) // Load packages. Ignore non-main packages. // Print a warning if an argument contains "..." and matches no main packages. @@ -872,7 +874,7 @@ func installOutsideModule(ctx context.Context, args []string) { // TODO(golang.org/issue/40276): don't report errors loading non-main packages // matched by a pattern. pkgOpts := load.PackageOpts{MainOnly: true} - pkgs, err := load.PackagesAndErrorsOutsideModule(ctx, pkgOpts, args) + pkgs, err := load.PackagesAndErrorsOutsideModule(loaderstate, ctx, pkgOpts, args) if err != nil { base.Fatal(err) } @@ -883,7 +885,7 @@ func installOutsideModule(ctx context.Context, args []string) { } // Build and install the packages. - InstallPackages(ctx, patterns, pkgs) + InstallPackages(loaderstate, ctx, patterns, pkgs) } // ExecCmd is the command to use to run user binaries. diff --git a/src/cmd/go/internal/work/buildid.go b/src/cmd/go/internal/work/buildid.go index 88c24b11acc..584c1ac6f41 100644 --- a/src/cmd/go/internal/work/buildid.go +++ b/src/cmd/go/internal/work/buildid.go @@ -148,9 +148,10 @@ func (b *Builder) toolID(name string) string { path := base.Tool(name) desc := "go tool " + name - // Special case: undocumented -vettool overrides usual vet, - // for testing vet or supplying an alternative analysis tool. - if name == "vet" && VetTool != "" { + // Special case: -{vet,fix}tool overrides usual cmd/{vet,fix} + // for testing or supplying an alternative analysis tool. + // (We use only "vet" terminology in the action graph.) + if name == "vet" { path = VetTool desc = VetTool } diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go index 72b9177c9db..0c9e96aebbf 100644 --- a/src/cmd/go/internal/work/exec.go +++ b/src/cmd/go/internal/work/exec.go @@ -169,9 +169,10 @@ func (b *Builder) Do(ctx context.Context, root *Action) { a.Package.Incomplete = true } } else { - var ipe load.ImportPathError - if a.Package != nil && (!errors.As(err, &ipe) || ipe.ImportPath() != a.Package.ImportPath) { - err = fmt.Errorf("%s: %v", a.Package.ImportPath, err) + if a.Package != nil { + if ipe, ok := errors.AsType[load.ImportPathError](err); !ok || ipe.ImportPath() != a.Package.ImportPath { + err = fmt.Errorf("%s: %v", a.Package.ImportPath, err) + } } sh := b.Shell(a) sh.Errorf("%s", err) @@ -1265,7 +1266,8 @@ func buildVetConfig(a *Action, srcfiles []string) { } } -// VetTool is the path to an alternate vet tool binary. +// VetTool is the path to the effective vet or fix tool binary. +// The user may specify a non-default value using -{vet,fix}tool. // The caller is expected to set it (if needed) before executing any vet actions. var VetTool string @@ -1273,7 +1275,13 @@ var VetTool string // The caller is expected to set them before executing any vet actions. var VetFlags []string -// VetExplicit records whether the vet flags were set explicitly on the command line. +// VetHandleStdout determines how the stdout output of each vet tool +// invocation should be handled. The default behavior is to copy it to +// the go command's stdout, atomically. +var VetHandleStdout = copyToStdout + +// VetExplicit records whether the vet flags (which may include +// -{vet,fix}tool) were set explicitly on the command line. var VetExplicit bool func (b *Builder) vet(ctx context.Context, a *Action) error { @@ -1296,6 +1304,7 @@ func (b *Builder) vet(ctx context.Context, a *Action) error { sh := b.Shell(a) + // We use "vet" terminology even when building action graphs for go fix. vcfg.VetxOnly = a.VetxOnly vcfg.VetxOutput = a.Objdir + "vet.out" vcfg.Stdout = a.Objdir + "vet.stdout" @@ -1322,7 +1331,7 @@ func (b *Builder) vet(ctx context.Context, a *Action) error { // dependency tree turn on *more* analysis, as here. // (The unsafeptr check does not write any facts for use by // later vet runs, nor does unreachable.) - if a.Package.Goroot && !VetExplicit && VetTool == "" { + if a.Package.Goroot && !VetExplicit && VetTool == base.Tool("vet") { // Turn off -unsafeptr checks. // There's too much unsafe.Pointer code // that vet doesn't like in low-level packages @@ -1359,13 +1368,29 @@ func (b *Builder) vet(ctx context.Context, a *Action) error { vcfg.PackageVetx[a1.Package.ImportPath] = a1.built } } - key := cache.ActionID(h.Sum()) + vetxKey := cache.ActionID(h.Sum()) // for .vetx file - if vcfg.VetxOnly && !cfg.BuildA { + fmt.Fprintf(h, "stdout\n") + stdoutKey := cache.ActionID(h.Sum()) // for .stdout file + + // Check the cache; -a forces a rebuild. + if !cfg.BuildA { c := cache.Default() - if file, _, err := cache.GetFile(c, key); err == nil { - a.built = file - return nil + if vcfg.VetxOnly { + if file, _, err := cache.GetFile(c, vetxKey); err == nil { + a.built = file + return nil + } + } else { + // Copy cached vet.std files to stdout. + if file, _, err := cache.GetFile(c, stdoutKey); err == nil { + f, err := os.Open(file) + if err != nil { + return err + } + defer f.Close() // ignore error (can't fail) + return VetHandleStdout(f) + } } } @@ -1387,31 +1412,46 @@ func (b *Builder) vet(ctx context.Context, a *Action) error { p := a.Package tool := VetTool if tool == "" { - tool = base.Tool("vet") + panic("VetTool unset") } - runErr := sh.run(p.Dir, p.ImportPath, env, cfg.BuildToolexec, tool, vetFlags, a.Objdir+"vet.cfg") - // If vet wrote export data, save it for input to future vets. + if err := sh.run(p.Dir, p.ImportPath, env, cfg.BuildToolexec, tool, vetFlags, a.Objdir+"vet.cfg"); err != nil { + return err + } + + // Vet tool succeeded, possibly with facts and JSON stdout. Save both in cache. + + // Save facts if f, err := os.Open(vcfg.VetxOutput); err == nil { + defer f.Close() // ignore error a.built = vcfg.VetxOutput - cache.Default().Put(key, f) // ignore error - f.Close() // ignore error + cache.Default().Put(vetxKey, f) // ignore error } - // If vet wrote to stdout, copy it to go's stdout, atomically. + // Save stdout. if f, err := os.Open(vcfg.Stdout); err == nil { - stdoutMu.Lock() - if _, err := io.Copy(os.Stdout, f); err != nil && runErr == nil { - runErr = fmt.Errorf("copying vet tool stdout: %w", err) + defer f.Close() // ignore error + if err := VetHandleStdout(f); err != nil { + return err } - f.Close() // ignore error - stdoutMu.Unlock() + f.Seek(0, io.SeekStart) // ignore error + cache.Default().Put(stdoutKey, f) // ignore error } - return runErr + return nil } -var stdoutMu sync.Mutex // serializes concurrent writes (e.g. JSON values) to stdout +var stdoutMu sync.Mutex // serializes concurrent writes (of e.g. JSON values) to stdout + +// copyToStdout copies the stream to stdout while holding the lock. +func copyToStdout(r io.Reader) error { + stdoutMu.Lock() + defer stdoutMu.Unlock() + if _, err := io.Copy(os.Stdout, r); err != nil { + return fmt.Errorf("copying vet tool stdout: %w", err) + } + return nil +} // linkActionID computes the action ID for a link action. func (b *Builder) linkActionID(a *Action) cache.ActionID { @@ -2220,7 +2260,7 @@ func (b *Builder) ccompile(a *Action, outfile string, flags []string, file strin } else if m.Dir == "" { // The module is in the vendor directory. Replace the entire vendor // directory path, because the module's Dir is not filled in. - from = modload.VendorDir() + from = b.getVendorDir() toPath = "vendor" } else { from = m.Dir @@ -3343,7 +3383,7 @@ func (b *Builder) swigDoIntSize(objdir string) (intsize string, err error) { } srcs := []string{src} - p := load.GoFilesPackage(context.TODO(), load.PackageOpts{}, srcs) + p := load.GoFilesPackage(modload.NewState(), context.TODO(), load.PackageOpts{}, srcs) if _, _, e := BuildToolchain.gc(b, &Action{Mode: "swigDoIntSize", Package: p, Objdir: objdir}, "", nil, nil, "", false, "", srcs); e != nil { return "32", nil diff --git a/src/cmd/go/internal/work/init.go b/src/cmd/go/internal/work/init.go index e4e83dc8f98..a2954ab91ab 100644 --- a/src/cmd/go/internal/work/init.go +++ b/src/cmd/go/internal/work/init.go @@ -50,14 +50,14 @@ func makeCfgChangedEnv() []string { return slices.Clip(env) } -func BuildInit() { +func BuildInit(loaderstate *modload.State) { if buildInitStarted { base.Fatalf("go: internal error: work.BuildInit called more than once") } buildInitStarted = true base.AtExit(closeBuilders) - modload.Init() + modload.Init(loaderstate) instrumentInit() buildModeInit() cfgChangedEnv = makeCfgChangedEnv() diff --git a/src/cmd/go/internal/work/security.go b/src/cmd/go/internal/work/security.go index 3b3eba536cb..ffa83e05917 100644 --- a/src/cmd/go/internal/work/security.go +++ b/src/cmd/go/internal/work/security.go @@ -100,7 +100,6 @@ var validCompilerFlags = []*lazyregexp.Regexp{ re(`-m(abi|arch|cpu|fpu|simd|tls-dialect|tune)=([^@\-].*)`), re(`-m(no-)?v?aes`), re(`-marm`), - re(`-m(no-)?avx[0-9a-z]*`), re(`-mcmodel=[0-9a-z-]+`), re(`-mfloat-abi=([^@\-].*)`), re(`-m(soft|single|double)-float`), @@ -375,13 +374,13 @@ Args: } if i+1 < len(list) { - return fmt.Errorf("invalid flag in %s: %s %s (see https://golang.org/s/invalidflag)", source, arg, list[i+1]) + return fmt.Errorf("invalid flag in %s: %s %s (see https://go.dev/s/invalidflag)", source, arg, list[i+1]) } - return fmt.Errorf("invalid flag in %s: %s without argument (see https://golang.org/s/invalidflag)", source, arg) + return fmt.Errorf("invalid flag in %s: %s without argument (see https://go.dev/s/invalidflag)", source, arg) } } Bad: - return fmt.Errorf("invalid flag in %s: %s", source, arg) + return fmt.Errorf("invalid flag in %s: %s (see https://go.dev/s/invalidflag)", source, arg) } return nil } diff --git a/src/cmd/go/internal/work/shell.go b/src/cmd/go/internal/work/shell.go index 284ed26f223..ceff84d81f8 100644 --- a/src/cmd/go/internal/work/shell.go +++ b/src/cmd/go/internal/work/shell.go @@ -123,6 +123,11 @@ func (sh *Shell) moveOrCopyFile(dst, src string, perm fs.FileMode, force bool) e return nil } + err := checkDstOverwrite(dst, force) + if err != nil { + return err + } + // If we can update the mode and rename to the dst, do it. // Otherwise fall back to standard copy. @@ -193,16 +198,9 @@ func (sh *Shell) CopyFile(dst, src string, perm fs.FileMode, force bool) error { } defer sf.Close() - // Be careful about removing/overwriting dst. - // Do not remove/overwrite if dst exists and is a directory - // or a non-empty non-object file. - if fi, err := os.Stat(dst); err == nil { - if fi.IsDir() { - return fmt.Errorf("build output %q already exists and is a directory", dst) - } - if !force && fi.Mode().IsRegular() && fi.Size() != 0 && !isObject(dst) { - return fmt.Errorf("build output %q already exists and is not an object file", dst) - } + err = checkDstOverwrite(dst, force) + if err != nil { + return err } // On Windows, remove lingering ~ file from last attempt. @@ -247,6 +245,21 @@ func mayberemovefile(s string) { os.Remove(s) } +// Be careful about removing/overwriting dst. +// Do not remove/overwrite if dst exists and is a directory +// or a non-empty non-object file. +func checkDstOverwrite(dst string, force bool) error { + if fi, err := os.Stat(dst); err == nil { + if fi.IsDir() { + return fmt.Errorf("build output %q already exists and is a directory", dst) + } + if !force && fi.Mode().IsRegular() && fi.Size() != 0 && !isObject(dst) { + return fmt.Errorf("build output %q already exists and is not an object file", dst) + } + } + return nil +} + // writeFile writes the text to file. func (sh *Shell) writeFile(file string, text []byte) error { if cfg.BuildN || cfg.BuildX { diff --git a/src/cmd/go/internal/workcmd/edit.go b/src/cmd/go/internal/workcmd/edit.go index 18730436ca8..b18098ba5d7 100644 --- a/src/cmd/go/internal/workcmd/edit.go +++ b/src/cmd/go/internal/workcmd/edit.go @@ -132,6 +132,7 @@ func init() { } func runEditwork(ctx context.Context, cmd *base.Command, args []string) { + moduleLoaderState := modload.NewState() if *editJSON && *editPrint { base.Fatalf("go: cannot use both -json and -print") } @@ -143,8 +144,8 @@ func runEditwork(ctx context.Context, cmd *base.Command, args []string) { if len(args) == 1 { gowork = args[0] } else { - modload.InitWorkfile() - gowork = modload.WorkFilePath() + moduleLoaderState.InitWorkfile() + gowork = modload.WorkFilePath(moduleLoaderState) } if gowork == "" { base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)") diff --git a/src/cmd/go/internal/workcmd/init.go b/src/cmd/go/internal/workcmd/init.go index 02240b8189f..896740f0803 100644 --- a/src/cmd/go/internal/workcmd/init.go +++ b/src/cmd/go/internal/workcmd/init.go @@ -44,11 +44,12 @@ func init() { } func runInit(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() + moduleLoaderState := modload.NewState() + moduleLoaderState.InitWorkfile() - modload.ForceUseModules = true + moduleLoaderState.ForceUseModules = true - gowork := modload.WorkFilePath() + gowork := modload.WorkFilePath(moduleLoaderState) if gowork == "" { gowork = filepath.Join(base.Cwd(), "go.work") } @@ -61,6 +62,6 @@ func runInit(ctx context.Context, cmd *base.Command, args []string) { wf := new(modfile.WorkFile) wf.Syntax = new(modfile.FileSyntax) wf.AddGoStmt(goV) - workUse(ctx, gowork, wf, args) + workUse(ctx, moduleLoaderState, gowork, wf, args) modload.WriteWorkFile(gowork, wf) } diff --git a/src/cmd/go/internal/workcmd/sync.go b/src/cmd/go/internal/workcmd/sync.go index 719cf76c9bf..13ce1e5f424 100644 --- a/src/cmd/go/internal/workcmd/sync.go +++ b/src/cmd/go/internal/workcmd/sync.go @@ -48,19 +48,20 @@ func init() { } func runSync(ctx context.Context, cmd *base.Command, args []string) { - modload.ForceUseModules = true - modload.InitWorkfile() - if modload.WorkFilePath() == "" { + moduleLoaderState := modload.NewState() + moduleLoaderState.ForceUseModules = true + moduleLoaderState.InitWorkfile() + if modload.WorkFilePath(moduleLoaderState) == "" { base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)") } - _, err := modload.LoadModGraph(ctx, "") + _, err := modload.LoadModGraph(moduleLoaderState, ctx, "") if err != nil { - toolchain.SwitchOrFatal(ctx, err) + toolchain.SwitchOrFatal(moduleLoaderState, ctx, err) } mustSelectFor := map[module.Version][]module.Version{} - mms := modload.MainModules + mms := moduleLoaderState.MainModules opts := modload.PackageOpts{ Tags: imports.AnyTags(), @@ -73,7 +74,7 @@ func runSync(ctx context.Context, cmd *base.Command, args []string) { } for _, m := range mms.Versions() { opts.MainModule = m - _, pkgs := modload.LoadPackages(ctx, opts, "all") + _, pkgs := modload.LoadPackages(moduleLoaderState, ctx, opts, "all") opts.MainModule = module.Version{} // reset var ( @@ -91,7 +92,7 @@ func runSync(ctx context.Context, cmd *base.Command, args []string) { mustSelectFor[m] = mustSelect } - workFilePath := modload.WorkFilePath() // save go.work path because EnterModule clobbers it. + workFilePath := modload.WorkFilePath(moduleLoaderState) // save go.work path because EnterModule clobbers it. var goV string for _, m := range mms.Versions() { @@ -104,7 +105,7 @@ func runSync(ctx context.Context, cmd *base.Command, args []string) { // Use EnterModule to reset the global state in modload to be in // single-module mode using the modroot of m. - modload.EnterModule(ctx, mms.ModRoot(m)) + modload.EnterModule(moduleLoaderState, ctx, mms.ModRoot(m)) // Edit the build list in the same way that 'go get' would if we // requested the relevant module versions explicitly. @@ -114,12 +115,12 @@ func runSync(ctx context.Context, cmd *base.Command, args []string) { // so we don't write some go.mods with the "before" toolchain // and others with the "after" toolchain. If nothing else, that // discrepancy could show up in auto-recorded toolchain lines. - changed, err := modload.EditBuildList(ctx, nil, mustSelectFor[m]) + changed, err := modload.EditBuildList(moduleLoaderState, ctx, nil, mustSelectFor[m]) if err != nil { continue } if changed { - modload.LoadPackages(ctx, modload.PackageOpts{ + modload.LoadPackages(moduleLoaderState, ctx, modload.PackageOpts{ Tags: imports.AnyTags(), Tidy: true, VendorModulesInGOROOTSrc: true, @@ -129,9 +130,9 @@ func runSync(ctx context.Context, cmd *base.Command, args []string) { SilenceMissingStdImports: true, SilencePackageErrors: true, }, "all") - modload.WriteGoMod(ctx, modload.WriteOpts{}) + modload.WriteGoMod(moduleLoaderState, ctx, modload.WriteOpts{}) } - goV = gover.Max(goV, modload.MainModules.GoVersion()) + goV = gover.Max(goV, moduleLoaderState.MainModules.GoVersion(moduleLoaderState)) } wf, err := modload.ReadWorkFile(workFilePath) diff --git a/src/cmd/go/internal/workcmd/use.go b/src/cmd/go/internal/workcmd/use.go index afbe99d3a48..041aa069e2d 100644 --- a/src/cmd/go/internal/workcmd/use.go +++ b/src/cmd/go/internal/workcmd/use.go @@ -61,9 +61,10 @@ func init() { } func runUse(ctx context.Context, cmd *base.Command, args []string) { - modload.ForceUseModules = true - modload.InitWorkfile() - gowork := modload.WorkFilePath() + moduleLoaderState := modload.NewState() + moduleLoaderState.ForceUseModules = true + moduleLoaderState.InitWorkfile() + gowork := modload.WorkFilePath(moduleLoaderState) if gowork == "" { base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)") } @@ -71,11 +72,11 @@ func runUse(ctx context.Context, cmd *base.Command, args []string) { if err != nil { base.Fatal(err) } - workUse(ctx, gowork, wf, args) + workUse(ctx, moduleLoaderState, gowork, wf, args) modload.WriteWorkFile(gowork, wf) } -func workUse(ctx context.Context, gowork string, wf *modfile.WorkFile, args []string) { +func workUse(ctx context.Context, s *modload.State, gowork string, wf *modfile.WorkFile, args []string) { workDir := filepath.Dir(gowork) // absolute, since gowork itself is absolute haveDirs := make(map[string][]string) // absolute → original(s) @@ -94,7 +95,7 @@ func workUse(ctx context.Context, gowork string, wf *modfile.WorkFile, args []st // all entries for the absolute path should be removed. keepDirs := make(map[string]string) - var sw toolchain.Switcher + sw := toolchain.NewSwitcher(s) // lookDir updates the entry in keepDirs for the directory dir, // which is either absolute or relative to the current working directory diff --git a/src/cmd/go/internal/workcmd/vendor.go b/src/cmd/go/internal/workcmd/vendor.go index f9f0cc08988..26715c8d3be 100644 --- a/src/cmd/go/internal/workcmd/vendor.go +++ b/src/cmd/go/internal/workcmd/vendor.go @@ -46,10 +46,11 @@ func init() { } func runVendor(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() - if modload.WorkFilePath() == "" { + moduleLoaderState := modload.NewState() + moduleLoaderState.InitWorkfile() + if modload.WorkFilePath(moduleLoaderState) == "" { base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)") } - modcmd.RunVendor(ctx, vendorE, vendorO, args) + modcmd.RunVendor(moduleLoaderState, ctx, vendorE, vendorO, args) } diff --git a/src/cmd/go/main.go b/src/cmd/go/main.go index e81969ca4a3..8cdfd9196e4 100644 --- a/src/cmd/go/main.go +++ b/src/cmd/go/main.go @@ -24,7 +24,6 @@ import ( "cmd/go/internal/clean" "cmd/go/internal/doc" "cmd/go/internal/envcmd" - "cmd/go/internal/fix" "cmd/go/internal/fmtcmd" "cmd/go/internal/generate" "cmd/go/internal/help" @@ -55,7 +54,7 @@ func init() { clean.CmdClean, doc.CmdDoc, envcmd.CmdEnv, - fix.CmdFix, + vet.CmdFix, fmtcmd.CmdFmt, generate.CmdGenerate, modget.CmdGet, diff --git a/src/cmd/go/scriptcmds_test.go b/src/cmd/go/scriptcmds_test.go index ced8d880e9a..8195e830caa 100644 --- a/src/cmd/go/scriptcmds_test.go +++ b/src/cmd/go/scriptcmds_test.go @@ -54,7 +54,8 @@ func scriptCC(cmdExec script.Cmd) script.Cmd { Args: "args...", }, func(s *script.State, args ...string) (script.WaitFunc, error) { - b := work.NewBuilder(s.Getwd()) + fakeVendorDirProvider := func() string { return "" } + b := work.NewBuilder(s.Getwd(), fakeVendorDirProvider) wait, err := cmdExec.Run(s, append(b.GccCmd(".", ""), args...)...) if err != nil { return wait, err diff --git a/src/cmd/go/scriptconds_test.go b/src/cmd/go/scriptconds_test.go index af9691ad2a4..c87c60ad33a 100644 --- a/src/cmd/go/scriptconds_test.go +++ b/src/cmd/go/scriptconds_test.go @@ -14,10 +14,13 @@ import ( "os" "os/exec" "path/filepath" + "regexp" "runtime" "runtime/debug" "sync" "testing" + + "golang.org/x/mod/semver" ) func scriptConditions(t *testing.T) map[string]script.Cond { @@ -41,6 +44,7 @@ func scriptConditions(t *testing.T) map[string]script.Cond { add("case-sensitive", script.OnceCondition("$WORK filesystem is case-sensitive", isCaseSensitive)) add("cc", script.PrefixCondition("go env CC = (ignoring the go/env file)", ccIs)) add("git", lazyBool("the 'git' executable exists and provides the standard CLI", hasWorkingGit)) + add("git-min-vers", script.PrefixCondition(" indicates a minimum git version", hasAtLeastGitVersion)) add("net", script.PrefixCondition("can connect to external network host ", hasNet)) add("trimpath", script.OnceCondition("test binary was built with -trimpath", isTrimpath)) @@ -153,6 +157,28 @@ func hasWorkingGit() bool { return err == nil } +var gitVersLineExtract = regexp.MustCompile(`git version\s+([\d.]+)`) + +func gitVersion() (string, error) { + gitOut, runErr := exec.Command("git", "version").CombinedOutput() + if runErr != nil { + return "v0", fmt.Errorf("failed to execute git version: %w", runErr) + } + matches := gitVersLineExtract.FindSubmatch(gitOut) + if len(matches) < 2 { + return "v0", fmt.Errorf("git version extraction regexp did not match version line: %q", gitOut) + } + return "v" + string(matches[1]), nil +} + +func hasAtLeastGitVersion(s *script.State, minVers string) (bool, error) { + gitVers, gitVersErr := gitVersion() + if gitVersErr != nil { + return false, gitVersErr + } + return semver.Compare(minVers, gitVers) <= 0, nil +} + func hasWorkingBzr() bool { bzr, err := exec.LookPath("bzr") if err != nil { diff --git a/src/cmd/go/testdata/script/README b/src/cmd/go/testdata/script/README index 7724bc10ec4..d4f4c47af70 100644 --- a/src/cmd/go/testdata/script/README +++ b/src/cmd/go/testdata/script/README @@ -399,6 +399,8 @@ The available conditions are: GOOS/GOARCH supports -fuzz with instrumentation [git] the 'git' executable exists and provides the standard CLI +[git-min-vers:*] + indicates a minimum git version [go-builder] GO_BUILDER_NAME is non-empty [link] diff --git a/src/cmd/go/testdata/script/build_git_sha256_go_get_branch.txt b/src/cmd/go/testdata/script/build_git_sha256_go_get_branch.txt index fa5557b21e6..0773e08ea53 100644 --- a/src/cmd/go/testdata/script/build_git_sha256_go_get_branch.txt +++ b/src/cmd/go/testdata/script/build_git_sha256_go_get_branch.txt @@ -1,5 +1,6 @@ [short] skip [!git] skip +[!git-min-vers:v2.29] skip env GOPRIVATE=vcs-test.golang.org diff --git a/src/cmd/go/testdata/script/build_git_sha256_moddep.txt b/src/cmd/go/testdata/script/build_git_sha256_moddep.txt index e5bf209d895..21a296bd3d0 100644 --- a/src/cmd/go/testdata/script/build_git_sha256_moddep.txt +++ b/src/cmd/go/testdata/script/build_git_sha256_moddep.txt @@ -1,5 +1,6 @@ [short] skip [!git] skip +[!git-min-vers:v2.29] skip env GOPRIVATE=vcs-test.golang.org diff --git a/src/cmd/go/testdata/script/build_output_overwrite.txt b/src/cmd/go/testdata/script/build_output_overwrite.txt new file mode 100644 index 00000000000..c7b967ccec3 --- /dev/null +++ b/src/cmd/go/testdata/script/build_output_overwrite.txt @@ -0,0 +1,20 @@ +# windows executables have the .exe extension and won't overwrite source files +[GOOS:windows] skip + +mkdir out +env GOTMPDIR=$PWD/out + +grep 'this should still exist' foo.go + +! go build +stderr 'already exists and is not an object file' + +grep 'this should still exist' foo.go + +-- go.mod -- +module foo.go + +-- foo.go -- +package main // this should still exist + +func main() {} diff --git a/src/cmd/go/testdata/script/chdir.txt b/src/cmd/go/testdata/script/chdir.txt index a6feed6b45f..41def410d5f 100644 --- a/src/cmd/go/testdata/script/chdir.txt +++ b/src/cmd/go/testdata/script/chdir.txt @@ -27,6 +27,10 @@ stderr 'strings\.test' go vet -C ../strings -n stderr strings_test +# go fix +go fix -C ../strings -n +stderr strings_test + # -C must be first on command line (as of Go 1.21) ! go test -n -C ../strings stderr '^invalid value "../strings" for flag -C: -C flag must be first flag on command line$' diff --git a/src/cmd/go/testdata/script/fix_suite.txt b/src/cmd/go/testdata/script/fix_suite.txt new file mode 100644 index 00000000000..455629dc172 --- /dev/null +++ b/src/cmd/go/testdata/script/fix_suite.txt @@ -0,0 +1,53 @@ +# Elementary test of each analyzer in the "go fix" suite. +# This is simply to prove that they are running at all; +# detailed behavior is tested in x/tools. +# +# Each assertion matches the expected diff. +# +# Tip: to see the actual stdout, +# temporarily prefix the go command with "! ". + +go fix -diff example.com/x + +# buildtag +stdout '-// \+build go1.26' + +# hostport +stdout 'net.Dial.*net.JoinHostPort' + +# inline +stdout 'var three = 1 \+ 2' + +# newexpr (proxy for whole modernize suite) +stdout 'var _ = new\(123\)' + +-- go.mod -- +module example.com/x +go 1.26 + +-- x.go -- +//go:build go1.26 +// +build go1.26 + +// ↑ buildtag + +package x + +import ( + "fmt" + "net" +) + +// hostport +var s string +var _, _ = net.Dial("tcp", fmt.Sprintf("%s:%d", s, 80)) + +//go:fix inline +func add(x, y int) int { return x + y } + +// inline +var three = add(1, 2) + +// newexpr +func varOf(x int) *int { return &x } +var _ = varOf(123) diff --git a/src/cmd/go/testdata/script/list_empty_importpath.txt b/src/cmd/go/testdata/script/list_empty_importpath.txt index 0960a7795d1..fe4210322bb 100644 --- a/src/cmd/go/testdata/script/list_empty_importpath.txt +++ b/src/cmd/go/testdata/script/list_empty_importpath.txt @@ -1,6 +1,15 @@ ! go list all ! stderr 'panic' -stderr 'invalid import path' +[!GOOS:windows] [!GOOS:solaris] [!GOOS:freebsd] [!GOOS:openbsd] [!GOOS:netbsd] stderr 'invalid import path' +# #73976: Allow 'no errors' on Windows, Solaris, and BSD until issue +# is resolved to prevent flakes. 'no errors' is printed by +# empty scanner.ErrorList errors so that's probably where the +# message is coming from, though we don't know how. +[GOOS:windows] stderr 'invalid import path|no errors' +[GOOS:solaris] stderr 'invalid import path|no errors' +[GOOS:freebsd] stderr 'invalid import path|no errors' +[GOOS:openbsd] stderr 'invalid import path|no errors' +[GOOS:netbsd] stderr 'invalid import path|no errors' # go list produces a package for 'p' but not for '' go list -e all diff --git a/src/cmd/go/testdata/script/mod_download_git_bareRepository_sha256.txt b/src/cmd/go/testdata/script/mod_download_git_bareRepository_sha256.txt index 9e8dc3c0150..df772f5c4bb 100644 --- a/src/cmd/go/testdata/script/mod_download_git_bareRepository_sha256.txt +++ b/src/cmd/go/testdata/script/mod_download_git_bareRepository_sha256.txt @@ -1,5 +1,6 @@ [short] skip [!git] skip +[!git-min-vers:v2.29] skip # This is a git sha256-mode copy of mod_download_git_bareRepository diff --git a/src/cmd/go/testdata/script/mod_edit_issue75105.txt b/src/cmd/go/testdata/script/mod_edit_issue75105.txt new file mode 100644 index 00000000000..8984daee255 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_edit_issue75105.txt @@ -0,0 +1,36 @@ +env GO111MODULE=on + +go mod edit -godebug 'http2debug=2' +cmp go.mod go.mod.edit.want1 + +go mod edit -json +cmp stdout go.mod.edit.want2 +-- go.mod -- +module foo + +go 1.25.0 +-- go.mod.edit.want1 -- +module foo + +go 1.25.0 + +godebug http2debug=2 +-- go.mod.edit.want2 -- +{ + "Module": { + "Path": "foo" + }, + "Go": "1.25.0", + "GoDebug": [ + { + "Key": "http2debug", + "Value": "2" + } + ], + "Require": null, + "Exclude": null, + "Replace": null, + "Retract": null, + "Tool": null, + "Ignore": null +} diff --git a/src/cmd/go/testdata/script/mod_get_pseudo.txt b/src/cmd/go/testdata/script/mod_get_pseudo.txt index 47ad54e352c..8b3585107fa 100644 --- a/src/cmd/go/testdata/script/mod_get_pseudo.txt +++ b/src/cmd/go/testdata/script/mod_get_pseudo.txt @@ -1,82 +1,83 @@ env GO111MODULE=on -# Testing git->module converter's generation of +incompatible tags; turn off proxy. -[!net:github.com] skip [!git] skip +[short] skip + +# Testing git->module converter's generation of +incompatible tags; turn off proxy. env GOPROXY=direct env GOSUMDB=off # We can resolve the @master branch without unshallowing the local repository # (even with older gits), so try that before we do anything else. # (This replicates https://golang.org/issue/26713 with git 2.7.4.) -go get github.com/rsc/legacytest@master +go get vcs-test.golang.org/git/legacytest.git@master go list -m all -stdout '^github.com/rsc/legacytest v2\.0\.1-0\.\d{14}-7303f7796364\+incompatible$' +stdout '^vcs-test.golang.org/git/legacytest.git v2\.0\.1-0\.\d{14}-7303f7796364\+incompatible$' # get should include incompatible tags in "latest" calculation. -go mod edit -droprequire github.com/rsc/legacytest -go get github.com/rsc/legacytest@latest +go mod edit -droprequire vcs-test.golang.org/git/legacytest.git +go get vcs-test.golang.org/git/legacytest.git@latest go list go list -m all -stdout '^github.com/rsc/legacytest v2\.0\.0\+incompatible$' +stdout '^vcs-test.golang.org/git/legacytest.git v2\.0\.0\+incompatible$' # v2.0.1-0.pseudo+incompatible -go get ...test@7303f77 +go get ...test.git@7303f77 go list -m all -stdout '^github.com/rsc/legacytest v2\.0\.1-0\.\d{14}-7303f7796364\+incompatible$' +stdout '^vcs-test.golang.org/git/legacytest.git v2\.0\.1-0\.\d{14}-7303f7796364\+incompatible$' # v2.0.0+incompatible by tag+incompatible -go get ...test@v2.0.0+incompatible +go get ...test.git@v2.0.0+incompatible go list -m all -stdout '^github.com/rsc/legacytest v2\.0\.0\+incompatible$' +stdout '^vcs-test.golang.org/git/legacytest.git v2\.0\.0\+incompatible$' # v2.0.0+incompatible by tag -go get ...test@v2.0.0 +go get ...test.git@v2.0.0 go list -m all -stdout '^github.com/rsc/legacytest v2\.0\.0\+incompatible$' +stdout '^vcs-test.golang.org/git/legacytest.git v2\.0\.0\+incompatible$' # v2.0.0+incompatible by hash (back on master) -go get ...test@d7ae1e4 +go get ...test.git@d7ae1e4 go list -m all -stdout '^github.com/rsc/legacytest v2\.0\.0\+incompatible$' +stdout '^vcs-test.golang.org/git/legacytest.git v2\.0\.0\+incompatible$' # v1.2.1-0.pseudo -go get ...test@d2d4c3e +go get ...test.git@d2d4c3e go list -m all -stdout '^github.com/rsc/legacytest v1\.2\.1-0\.\d{14}-d2d4c3ea6623$' +stdout '^vcs-test.golang.org/git/legacytest.git v1\.2\.1-0\.\d{14}-d2d4c3ea6623$' # v1.2.0 -go get ...test@9f6f860 +go get ...test.git@9f6f860 go list -m all -stdout '^github.com/rsc/legacytest v1\.2\.0$' +stdout '^vcs-test.golang.org/git/legacytest.git v1\.2\.0$' # v1.1.0-pre.0.pseudo -go get ...test@fb3c628 +go get ...test.git@fb3c628 go list -m all -stdout '^github.com/rsc/legacytest v1\.1\.0-pre\.0\.\d{14}-fb3c628075e3$' +stdout '^vcs-test.golang.org/git/legacytest.git v1\.1\.0-pre\.0\.\d{14}-fb3c628075e3$' # v1.1.0-pre (no longer on master) -go get ...test@731e3b1 +go get ...test.git@731e3b1 go list -m all -stdout '^github.com/rsc/legacytest v1\.1\.0-pre$' +stdout '^vcs-test.golang.org/git/legacytest.git v1\.1\.0-pre$' # v1.0.1-0.pseudo -go get ...test@fa4f5d6 +go get ...test.git@fa4f5d6 go list -m all -stdout '^github.com/rsc/legacytest v1\.0\.1-0\.\d{14}-fa4f5d6a71c6$' +stdout '^vcs-test.golang.org/git/legacytest.git v1\.0\.1-0\.\d{14}-fa4f5d6a71c6$' # v1.0.0 -go get ...test@7fff7f3 +go get ...test.git@7fff7f3 go list -m all -stdout '^github.com/rsc/legacytest v1\.0\.0$' +stdout '^vcs-test.golang.org/git/legacytest.git v1\.0\.0$' # v0.0.0-pseudo -go get ...test@52853eb +go get ...test.git@52853eb go list -m all -stdout '^github.com/rsc/legacytest v0\.0\.0-\d{14}-52853eb7b552$' +stdout '^vcs-test.golang.org/git/legacytest.git v0\.0\.0-\d{14}-52853eb7b552$' -- go.mod -- module x -- x.go -- package x -import "github.com/rsc/legacytest" +import "vcs-test.golang.org/git/legacytest.git" diff --git a/src/cmd/go/testdata/script/mod_get_pseudo_hg.txt b/src/cmd/go/testdata/script/mod_get_pseudo_hg.txt new file mode 100644 index 00000000000..308fa621c13 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_get_pseudo_hg.txt @@ -0,0 +1,81 @@ +env GO111MODULE=on + +[!exec:hg] skip +[short] skip + +# Testing hg->module converter's generation of +incompatible tags; turn off proxy. +env GOPROXY=direct +env GOSUMDB=off + +# get default +go get vcs-test.golang.org/hg/legacytest.hg@default +go list -m all +stdout '^vcs-test.golang.org/hg/legacytest.hg v1\.2\.1-0\.20180717164942-2840708d1294$' + +# get should include incompatible tags in "latest" calculation. +go mod edit -droprequire vcs-test.golang.org/hg/legacytest.hg +go get vcs-test.golang.org/hg/legacytest.hg@latest +go list +go list -m all +stdout '^vcs-test.golang.org/hg/legacytest.hg v2\.0\.0\+incompatible$' + +# v2.0.1-0.pseudo+incompatible +go get ...test.hg@d6ad6040 +go list -m all +stdout '^vcs-test.golang.org/hg/legacytest.hg v2\.0\.1-0\.\d{14}-d6ad604046f6\+incompatible$' + +# v2.0.0+incompatible by tag+incompatible +go get ...test.hg@v2.0.0+incompatible +go list -m all +stdout '^vcs-test.golang.org/hg/legacytest.hg v2\.0\.0\+incompatible$' + +# v2.0.0+incompatible by tag +go get ...test.hg@v2.0.0 +go list -m all +stdout '^vcs-test.golang.org/hg/legacytest.hg v2\.0\.0\+incompatible$' + +# v2.0.0+incompatible by hash (back on master) +go get ...test.hg@e64782f +go list -m all +stdout '^vcs-test.golang.org/hg/legacytest.hg v2\.0\.0\+incompatible$' + +# v1.2.1-0.pseudo +go get ...test.hg@ed9a22e +go list -m all +stdout '^vcs-test.golang.org/hg/legacytest.hg v1\.2\.1-0\.\d{14}-ed9a22ebb8a1$' + +# v1.2.0 +go get ...test.hg@07462d +go list -m all +stdout '^vcs-test.golang.org/hg/legacytest.hg v1\.2\.0$' + +# v1.1.0-pre.0.pseudo +go get ...test.hg@accb16 +go list -m all +stdout '^vcs-test.golang.org/hg/legacytest.hg v1\.1\.0-pre\.0\.\d{14}-accb169a3696$' + +# v1.1.0-pre (no longer on master) +go get ...test.hg@90da67a9 +go list -m all +stdout '^vcs-test.golang.org/hg/legacytest.hg v1\.1\.0-pre$' + +# v1.0.1-0.pseudo +go get ...test.hg@c6260a +go list -m all +stdout '^vcs-test.golang.org/hg/legacytest.hg v1\.0\.1-0\.\d{14}-c6260ab8dc3e$' + +# v1.0.0 +go get ...test.hg@d6ad17 +go list -m all +stdout '^vcs-test.golang.org/hg/legacytest.hg v1\.0\.0$' + +# v0.0.0-pseudo +go get ...test.hg@ee0106d +go list -m all +stdout '^vcs-test.golang.org/hg/legacytest.hg v0\.0\.0-\d{14}-ee0106da3c7c$' + +-- go.mod -- +module x +-- x.go -- +package x +import "vcs-test.golang.org/hg/legacytest.hg" diff --git a/src/cmd/go/testdata/script/mod_removed_godebug.txt b/src/cmd/go/testdata/script/mod_removed_godebug.txt new file mode 100644 index 00000000000..bd1f61c9d26 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_removed_godebug.txt @@ -0,0 +1,11 @@ +# Test case that makes sure we print a nice error message +# instead of the generic "unknown godebug" error message +# for removed GODEBUGs. + +! go list +stderr '^go.mod:3: use of removed godebug "x509sha1", see https://go.dev/doc/godebug#go-124$' + +-- go.mod -- +module example.com/bar + +godebug x509sha1=1 diff --git a/src/cmd/go/testdata/script/reuse_git.txt b/src/cmd/go/testdata/script/reuse_git.txt index 3c1b38b04de..faf2124db56 100644 --- a/src/cmd/go/testdata/script/reuse_git.txt +++ b/src/cmd/go/testdata/script/reuse_git.txt @@ -5,11 +5,11 @@ env GO111MODULE=on env GOPROXY=direct env GOSUMDB=off -# go mod download with the pseudo-version should invoke git but not have a TagSum or Ref. +# go mod download with the pseudo-version should invoke git but not have a TagSum or Ref or RepoSum. go mod download -x -json vcs-test.golang.org/git/hello.git@v0.0.0-20170922010558-fc3a09f3dc5c stderr 'git( .*)* fetch' cp stdout hellopseudo.json -! stdout '"(Query|TagPrefix|TagSum|Ref)"' +! stdout '"(Query|TagPrefix|TagSum|Ref|RepoSum)"' stdout '"Version": "v0.0.0-20170922010558-fc3a09f3dc5c"' stdout '"VCS": "git"' stdout '"URL": ".*/git/hello"' @@ -28,6 +28,7 @@ stdout '"Query": "latest"' stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="' stdout '"Ref": "HEAD"' stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"' +! stdout '"RepoSum"' # pseudo-version again should not invoke git fetch (it has the version from the @latest query) # but still be careful not to include a TagSum or a Ref, especially not Ref set to HEAD, @@ -47,6 +48,7 @@ stdout '"VCS": "git"' stdout '"URL": ".*/git/hello"' stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="' stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"' +! stdout '"RepoSum"' # go mod download vcstest/hello/v9 should fail, still print origin info ! go mod download -x -json vcs-test.golang.org/git/hello.git/v9@latest @@ -108,6 +110,7 @@ stdout '"URL": ".*/git/tagtests"' stdout '"TagSum": "t1:Dp7yRKDuE8WjG0429PN9hYWjqhy2te7P9Oki/sMEOGo="' stdout '"Ref": "refs/tags/v0.2.2"' stdout '"Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"' +! stdout '"RepoSum"' # go mod download vcstest/tagtests@v0.2.2 should print origin info, no TagSum needed go mod download -x -json vcs-test.golang.org/git/tagtests.git@v0.2.2 @@ -120,6 +123,7 @@ stdout '"URL": ".*/git/tagtests"' ! stdout '"TagSum"' stdout '"Ref": "refs/tags/v0.2.2"' stdout '"Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"' +! stdout '"RepoSum"' # go mod download vcstest/tagtests@master needs a TagSum again go mod download -x -json vcs-test.golang.org/git/tagtests.git@master @@ -132,6 +136,7 @@ stdout '"URL": ".*/git/tagtests"' stdout '"TagSum": "t1:Dp7yRKDuE8WjG0429PN9hYWjqhy2te7P9Oki/sMEOGo="' stdout '"Ref": "refs/heads/master"' stdout '"Hash": "c7818c24fa2f3f714c67d0a6d3e411c85a518d1f"' +! stdout '"RepoSum"' # go mod download vcstest/prefixtagtests should invoke git, print origin info go mod download -x -json vcs-test.golang.org/git/prefixtagtests.git/sub@latest @@ -146,6 +151,7 @@ stdout '"TagPrefix": "sub/"' stdout '"TagSum": "t1:YGSbWkJ8dn9ORAr[+]BlKHFK/2ZhXLb9hVuYfTZ9D8C7g="' stdout '"Ref": "refs/tags/sub/v0.0.10"' stdout '"Hash": "2b7c4692e12c109263cab51b416fcc835ddd7eae"' +! stdout '"RepoSum"' # go mod download of a bunch of these should fail (some are invalid) but write good JSON for later ! go mod download -json vcs-test.golang.org/git/hello.git@latest vcs-test.golang.org/git/hello.git/v9@latest vcs-test.golang.org/git/hello.git/sub/v9@latest vcs-test.golang.org/git/tagtests.git@latest vcs-test.golang.org/git/tagtests.git@v0.2.2 vcs-test.golang.org/git/tagtests.git@master @@ -158,6 +164,7 @@ stderr 'git( .*)* fetch' go clean -modcache # reuse go mod download vcstest/hello result +go clean -modcache go mod download -reuse=hello.json -x -json vcs-test.golang.org/git/hello.git@latest ! stderr 'git( .*)* fetch' stdout '"Reuse": true' @@ -168,12 +175,10 @@ stdout '"URL": ".*/git/hello"' stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="' stdout '"Ref": "HEAD"' stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"' -! stdout '"Dir"' -! stdout '"Info"' -! stdout '"GoMod"' -! stdout '"Zip"' +! stdout '"(Dir|Info|GoMod|Zip|RepoSum)"' # reuse go mod download vcstest/hello pseudoversion result +go clean -modcache go mod download -reuse=hellopseudo.json -x -json vcs-test.golang.org/git/hello.git@v0.0.0-20170922010558-fc3a09f3dc5c ! stderr 'git( .*)* fetch' stdout '"Reuse": true' @@ -182,9 +187,10 @@ stdout '"VCS": "git"' stdout '"URL": ".*/git/hello"' ! stdout '"(Query|TagPrefix|TagSum|Ref)"' stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"' -! stdout '"(Dir|Info|GoMod|Zip)"' +! stdout '"(Dir|Info|GoMod|Zip|RepoSum)"' # reuse go mod download vcstest/hello@hash +go clean -modcache go mod download -reuse=hellohash.json -x -json vcs-test.golang.org/git/hello.git@fc3a09f3dc5c ! stderr 'git( .*)* fetch' stdout '"Reuse": true' @@ -195,9 +201,10 @@ stdout '"URL": ".*/git/hello"' ! stdout '"(TagPrefix|Ref)"' stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="' stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"' -! stdout '"(Dir|Info|GoMod|Zip)"' +! stdout '"(Dir|Info|GoMod|Zip|RepoSum)"' # reuse go mod download vcstest/hello/v9 error result +go clean -modcache ! go mod download -reuse=hellov9.json -x -json vcs-test.golang.org/git/hello.git/v9@latest ! stderr 'git( .*)* fetch' stdout '"Reuse": true' @@ -206,9 +213,10 @@ stdout '"Error":.*no matching versions' stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="' stdout '"Ref": "HEAD"' stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"' -! stdout '"(Dir|Info|GoMod|Zip)"' +! stdout '"(Dir|Info|GoMod|Zip|RepoSum)"' # reuse go mod download vcstest/hello/sub/v9 error result +go clean -modcache ! go mod download -reuse=hellosubv9.json -x -json vcs-test.golang.org/git/hello.git/sub/v9@latest ! stderr 'git( .*)* fetch' stdout '"Reuse": true' @@ -217,9 +225,10 @@ stdout '"TagPrefix": "sub/"' stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="' stdout '"Ref": "HEAD"' stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"' -! stdout '"(Dir|Info|GoMod|Zip)"' +! stdout '"(Dir|Info|GoMod|Zip|RepoSum)"' # reuse go mod download vcstest/hello@nonexist +go clean -modcache ! go mod download -reuse=hellononexist.json -x -json vcs-test.golang.org/git/hello.git@nonexist ! stderr 'git( .*)* fetch' stdout '"Reuse": true' @@ -230,6 +239,7 @@ stdout '"RepoSum": "r1:c0/9JCZ25lxoBiK3[+]3BhACU4giH49flcJmBynJ[+]Jvmc="' ! stdout '"(Dir|Info|GoMod|Zip)"' # reuse go mod download vcstest/hello@1234567890123456789012345678901234567890 +go clean -modcache ! go mod download -reuse=hellononhash.json -x -json vcs-test.golang.org/git/hello.git@1234567890123456789012345678901234567890 ! stderr 'git( .*)* fetch' stdout '"Reuse": true' @@ -240,6 +250,7 @@ stdout '"RepoSum": "r1:c0/9JCZ25lxoBiK3[+]3BhACU4giH49flcJmBynJ[+]Jvmc="' ! stdout '"(Dir|Info|GoMod|Zip)"' # reuse go mod download vcstest/hello@v0.0.0-20220101120101-123456789abc +go clean -modcache ! go mod download -reuse=hellononpseudo.json -x -json vcs-test.golang.org/git/hello.git@v0.0.0-20220101120101-123456789abc ! stderr 'git( .*)* fetch' stdout '"Reuse": true' @@ -250,6 +261,7 @@ stdout '"RepoSum": "r1:c0/9JCZ25lxoBiK3[+]3BhACU4giH49flcJmBynJ[+]Jvmc="' ! stdout '"(Dir|Info|GoMod|Zip)"' # reuse go mod download vcstest/tagtests result +go clean -modcache go mod download -reuse=tagtests.json -x -json vcs-test.golang.org/git/tagtests.git@latest ! stderr 'git( .*)* fetch' stdout '"Reuse": true' @@ -261,9 +273,10 @@ stdout '"URL": ".*/git/tagtests"' stdout '"TagSum": "t1:Dp7yRKDuE8WjG0429PN9hYWjqhy2te7P9Oki/sMEOGo="' stdout '"Ref": "refs/tags/v0.2.2"' stdout '"Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"' -! stdout '"(Dir|Info|GoMod|Zip)"' +! stdout '"(Dir|Info|GoMod|Zip|RepoSum)"' # reuse go mod download vcstest/tagtests@v0.2.2 result +go clean -modcache go mod download -reuse=tagtestsv022.json -x -json vcs-test.golang.org/git/tagtests.git@v0.2.2 ! stderr 'git( .*)* fetch' stdout '"Reuse": true' @@ -275,9 +288,10 @@ stdout '"URL": ".*/git/tagtests"' ! stdout '"TagSum"' stdout '"Ref": "refs/tags/v0.2.2"' stdout '"Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"' -! stdout '"(Dir|Info|GoMod|Zip)"' +! stdout '"(Dir|Info|GoMod|Zip|RepoSum)"' # reuse go mod download vcstest/tagtests@master result +go clean -modcache go mod download -reuse=tagtestsmaster.json -x -json vcs-test.golang.org/git/tagtests.git@master ! stderr 'git( .*)* fetch' stdout '"Reuse": true' @@ -289,9 +303,10 @@ stdout '"URL": ".*/git/tagtests"' stdout '"TagSum": "t1:Dp7yRKDuE8WjG0429PN9hYWjqhy2te7P9Oki/sMEOGo="' stdout '"Ref": "refs/heads/master"' stdout '"Hash": "c7818c24fa2f3f714c67d0a6d3e411c85a518d1f"' -! stdout '"(Dir|Info|GoMod|Zip)"' +! stdout '"(Dir|Info|GoMod|Zip|RepoSum)"' # reuse go mod download vcstest/tagtests@master result again with all.json +go clean -modcache go mod download -reuse=all.json -x -json vcs-test.golang.org/git/tagtests.git@master ! stderr 'git( .*)* fetch' stdout '"Reuse": true' @@ -303,9 +318,10 @@ stdout '"URL": ".*/git/tagtests"' stdout '"TagSum": "t1:Dp7yRKDuE8WjG0429PN9hYWjqhy2te7P9Oki/sMEOGo="' stdout '"Ref": "refs/heads/master"' stdout '"Hash": "c7818c24fa2f3f714c67d0a6d3e411c85a518d1f"' -! stdout '"(Dir|Info|GoMod|Zip)"' +! stdout '"(Dir|Info|GoMod|Zip|RepoSum)"' # go mod download vcstest/prefixtagtests result with json +go clean -modcache go mod download -reuse=prefixtagtests.json -x -json vcs-test.golang.org/git/prefixtagtests.git/sub@latest ! stderr 'git( .*)* fetch' stdout '"Version": "v0.0.10"' @@ -320,12 +336,14 @@ stdout '"Hash": "2b7c4692e12c109263cab51b416fcc835ddd7eae"' ! stdout '"(Dir|Info|GoMod|Zip)"' # reuse the bulk results with all.json +go clean -modcache ! go mod download -reuse=all.json -json vcs-test.golang.org/git/hello.git@latest vcs-test.golang.org/git/hello.git/v9@latest vcs-test.golang.org/git/hello.git/sub/v9@latest vcs-test.golang.org/git/tagtests.git@latest vcs-test.golang.org/git/tagtests.git@v0.2.2 vcs-test.golang.org/git/tagtests.git@master ! stderr 'git( .*)* fetch' stdout '"Reuse": true' ! stdout '"(Dir|Info|GoMod|Zip)"' # reuse attempt with stale hash should reinvoke git, not report reuse +go clean -modcache cp tagtestsv022.json tagtestsv022badhash.json replace '57952' '56952XXX' tagtestsv022badhash.json go mod download -reuse=tagtestsv022badhash.json -x -json vcs-test.golang.org/git/tagtests.git@v0.2.2 @@ -335,7 +353,7 @@ stdout '"Version": "v0.2.2"' ! stdout '"Query"' stdout '"VCS": "git"' stdout '"URL": ".*/git/tagtests"' -! stdout '"(TagPrefix|TagSum)"' +! stdout '"(TagPrefix|TagSum|RepoSum)"' stdout '"Ref": "refs/tags/v0.2.2"' stdout '"Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"' stdout '"Dir"' @@ -344,6 +362,7 @@ stdout '"GoMod"' stdout '"Zip"' # reuse with stale repo URL +go clean -modcache cp tagtestsv022.json tagtestsv022badurl.json replace 'git/tagtests\"' 'git/tagtestsXXX\"' tagtestsv022badurl.json go mod download -reuse=tagtestsv022badurl.json -x -json vcs-test.golang.org/git/tagtests.git@v0.2.2 @@ -355,22 +374,107 @@ stdout '"GoMod"' stdout '"Zip"' # reuse with stale VCS +go clean -modcache cp tagtestsv022.json tagtestsv022badvcs.json replace '\"git\"' '\"gitXXX\"' tagtestsv022badvcs.json go mod download -reuse=tagtestsv022badvcs.json -x -json vcs-test.golang.org/git/tagtests.git@v0.2.2 ! stdout '"Reuse": true' stdout '"URL": ".*/git/tagtests"' +! stdout '"RepoSum"' # reuse with stale Dir +go clean -modcache cp tagtestsv022.json tagtestsv022baddir.json replace '\t\t\"Ref\":' '\t\t\"Subdir\": \"subdir\",\n\t\t\"Ref\":' tagtestsv022baddir.json go mod download -reuse=tagtestsv022baddir.json -x -json vcs-test.golang.org/git/tagtests.git@v0.2.2 ! stdout '"Reuse": true' stdout '"URL": ".*/git/tagtests"' +! stdout '"RepoSum"' # reuse with stale TagSum +go clean -modcache cp tagtests.json tagtestsbadtagsum.json replace 'sMEOGo=' 'sMEoGo=XXX' tagtestsbadtagsum.json go mod download -reuse=tagtestsbadtagsum.json -x -json vcs-test.golang.org/git/tagtests.git@latest ! stdout '"Reuse": true' stdout '"TagSum": "t1:Dp7yRKDuE8WjG0429PN9hYWjqhy2te7P9Oki/sMEOGo="' +! stdout '"RepoSum"' + +# go list on repo with no tags +go clean -modcache +go list -x -json -m -retracted -versions vcs-test.golang.org/git/hello.git@latest +stderr 'git( .*)* fetch' +cp stdout hellolist.json +! stdout '"Versions"' +stdout '"Version": "v0.0.0-20170922010558-fc3a09f3dc5c"' +stdout '"VCS": "git"' +stdout '"URL": ".*/git/hello"' +stdout '"Query": "latest"' +! stdout '"TagPrefix"' +stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="' +stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"' +! stdout '"RepoSum"' + +# reuse go list on repo with no tags +go clean -modcache +go list -x -reuse=hellolist.json -json -m -retracted -versions vcs-test.golang.org/git/hello.git@latest +! stderr 'git( .*)* fetch' +stdout '"Reuse": true' +! stdout '"Versions"' +stdout '"Version": "v0.0.0-20170922010558-fc3a09f3dc5c"' +stdout '"VCS": "git"' +stdout '"URL": ".*/git/hello"' +stdout '"Query": "latest"' +! stdout '"TagPrefix"' +stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="' +stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"' +! stdout '"RepoSum"' + +# reuse with stale list +go clean -modcache +cp hellolist.json hellolistbad.json +replace '47DEQ' 'ZZZ' hellolistbad.json +go clean -modcache +go list -x -reuse=hellolistbad.json -json -m -retracted -versions vcs-test.golang.org/git/hello.git@latest +stderr 'git( .*)* fetch' +! stdout '"Reuse": true' +stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="' + +# go list on repo with tags +go clean -modcache +go list -x -json -m -retracted -versions vcs-test.golang.org/git/tagtests.git@latest +cp stdout taglist.json +stderr 'git( .*)* fetch' +stdout '"Versions":' +stdout '"v0.2.1"' +stdout '"v0.2.2"' +stdout '"Version": "v0.2.2"' +stdout '"VCS": "git"' +stdout '"URL": ".*/git/tagtests"' +stdout '"Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"' +stdout '"TagSum": "t1:Dp7yRKDuE8WjG0429PN9hYWjqhy2te7P9Oki/sMEOGo="' +stdout '"Ref": "refs/tags/v0.2.2"' + +# reuse go list on repo with tags +go clean -modcache +go list -reuse=taglist.json -x -json -m -retracted -versions vcs-test.golang.org/git/tagtests.git@latest +! stderr 'git( .*)* fetch' +stdout '"Reuse": true' +stdout '"Versions":' +stdout '"v0.2.1"' +stdout '"v0.2.2"' +stdout '"Version": "v0.2.2"' +stdout '"VCS": "git"' +stdout '"URL": ".*/git/tagtests"' +stdout '"Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"' +stdout '"TagSum": "t1:Dp7yRKDuE8WjG0429PN9hYWjqhy2te7P9Oki/sMEOGo="' +stdout '"Ref": "refs/tags/v0.2.2"' + +# reuse with stale list +go clean -modcache +cp taglist.json taglistbad.json +replace 'Dp7yRKDu' 'ZZZ' taglistbad.json +go list -reuse=taglistbad.json -x -json -m -retracted -versions vcs-test.golang.org/git/tagtests.git@latest +stderr 'git( .*)* fetch' +! stdout '"Reuse": true' +stdout '"TagSum": "t1:Dp7yRKDuE8WjG0429PN9hYWjqhy2te7P9Oki/sMEOGo="' diff --git a/src/cmd/go/testdata/script/reuse_hg.txt b/src/cmd/go/testdata/script/reuse_hg.txt new file mode 100644 index 00000000000..d7637a9c552 --- /dev/null +++ b/src/cmd/go/testdata/script/reuse_hg.txt @@ -0,0 +1,471 @@ +[short] skip +[!exec:hg] skip + +env GO111MODULE=on +env GOPROXY=direct +env GOSUMDB=off + +# go mod download with the pseudo-version should invoke hg but not have a TagSum or Ref or RepoSum. +go mod download -x -json vcs-test.golang.org/hg/hello.hg@v0.0.0-20170922011414-e483a7d9f8c9 +stderr 'hg( .*)* pull' +cp stdout hellopseudo.json +! stdout '"(Query|TagPrefix|TagSum|Ref|RepoSum)"' +stdout '"Version": "v0.0.0-20170922011414-e483a7d9f8c9"' +stdout '"VCS": "hg"' +stdout '"URL": ".*/hg/hello"' +stdout '"Hash": "e483a7d9f8c9b4bc57430bdd8f81f0a65e4011c0"' +! stdout '"RepoSum"' +go clean -modcache + +# go mod download vcstest/hello should invoke hg, print origin info +go mod download -x -json vcs-test.golang.org/hg/hello.hg@latest +stderr 'hg( .*)* pull' +cp stdout hello.json +stdout '"Version": "v0.0.0-20170922011414-e483a7d9f8c9"' +stdout '"VCS": "hg"' +stdout '"URL": ".*/hg/hello"' +stdout '"Query": "latest"' +! stdout '"TagPrefix"' +! stdout '"TagSum"' +stdout '"Ref": "tip"' +stdout '"RepoSum": "r1:blLvkhBriVMV[+]6Il4Ub43wlyWXIe1NpobTelF0peaG0="' +stdout '"Hash": "e483a7d9f8c9b4bc57430bdd8f81f0a65e4011c0"' + +# pseudo-version again should not invoke hg pull (it has the version from the @latest query) +# but still be careful not to include a TagSum or a Ref, especially not Ref set to HEAD, +# which is easy to do when reusing the cached version from the @latest query. +go mod download -x -json vcs-test.golang.org/hg/hello.hg@v0.0.0-20170922011414-e483a7d9f8c9 +! stderr 'hg( .*)* pull' +cp stdout hellopseudo2.json +cmpenv hellopseudo.json hellopseudo2.json + +# go mod download hg/hello@hash needs to check RepoSum to find pseudoversion base, +# which does a refreshing hg pull. +go mod download -x -json vcs-test.golang.org/hg/hello.hg@e483a7d9f8c9 +stderr 'hg( .*)* pull' +cp stdout hellohash.json +stdout '"Version": "v0.0.0-20170922011414-e483a7d9f8c9"' +stdout '"Query": "e483a7d9f8c9"' +stdout '"VCS": "hg"' +stdout '"URL": ".*/hg/hello"' +stdout '"Hash": "e483a7d9f8c9b4bc57430bdd8f81f0a65e4011c0"' +stdout '"RepoSum": "r1:blLvkhBriVMV[+]6Il4Ub43wlyWXIe1NpobTelF0peaG0="' + +# go mod download vcstest/hello/v9 should fail, still print origin info +# hg uses RepoSum instead of TagSum to describe failure condition. +! go mod download -x -json vcs-test.golang.org/hg/hello.hg/v9@latest +cp stdout hellov9.json +stdout '"Version": "latest"' +stdout '"Error":.*no matching versions' +! stdout '"TagPrefix"' +! stdout '"TagSum"' +stdout '"RepoSum": "r1:blLvkhBriVMV[+]6Il4Ub43wlyWXIe1NpobTelF0peaG0="' +stdout '"Hash": "e483a7d9f8c9b4bc57430bdd8f81f0a65e4011c0"' + +# go mod download vcstest/hello/sub/v9 should also fail, print origin info +# hg uses RepoSum instead of TagSum to describe failure condition. +! go mod download -x -json vcs-test.golang.org/hg/hello.hg/sub/v9@latest +cp stdout hellosubv9.json +stdout '"Version": "latest"' +stdout '"Error":.*no matching versions' +! stdout '"TagPrefix"' +stdout '"RepoSum": "r1:blLvkhBriVMV[+]6Il4Ub43wlyWXIe1NpobTelF0peaG0="' +stdout '"Hash": "e483a7d9f8c9b4bc57430bdd8f81f0a65e4011c0"' + +# go mod download vcstest/hello@nonexist should fail, still print origin info +! go mod download -x -json vcs-test.golang.org/hg/hello.hg@nonexist +cp stdout hellononexist.json +stdout '"Version": "nonexist"' +stdout '"Error":.*unknown revision nonexist' +stdout '"RepoSum": "r1:blLvkhBriVMV[+]6Il4Ub43wlyWXIe1NpobTelF0peaG0="' +! stdout '"(TagPrefix|TagSum|Ref|Hash)"' + +# go mod download vcstest/hello@1234567890123456789012345678901234567890 should fail, still print origin info +# (40 hex digits is assumed to be a full hash and is a slightly different code path from @nonexist) +! go mod download -x -json vcs-test.golang.org/hg/hello.hg@1234567890123456789012345678901234567890 +cp stdout hellononhash.json +stdout '"Version": "1234567890123456789012345678901234567890"' +stdout '"Error":.*unknown revision 1234567890123456789012345678901234567890' +stdout '"RepoSum": "r1:blLvkhBriVMV[+]6Il4Ub43wlyWXIe1NpobTelF0peaG0="' +! stdout '"(TagPrefix|TagSum|Ref|Hash)"' + +# go mod download vcstest/hello@v0.0.0-20220101120101-123456789abc should fail, still print origin info +# (non-existent pseudoversion) +! go mod download -x -json vcs-test.golang.org/hg/hello.hg@v0.0.0-20220101120101-123456789abc +cp stdout hellononpseudo.json +stdout '"Version": "v0.0.0-20220101120101-123456789abc"' +stdout '"Error":.*unknown revision 123456789abc' +stdout '"RepoSum": "r1:blLvkhBriVMV[+]6Il4Ub43wlyWXIe1NpobTelF0peaG0="' +! stdout '"(TagPrefix|TagSum|Ref|Hash)"' + +# go mod download vcstest/tagtests should invoke hg, print origin info +# Need RepoSum to lock in tagged "latest". +go mod download -x -json vcs-test.golang.org/hg/tagtests.hg@latest +stderr 'hg( .*)* pull' +cp stdout tagtests.json +stdout '"Version": "v0.2.2"' +stdout '"Query": "latest"' +stdout '"VCS": "hg"' +stdout '"URL": ".*/hg/tagtests"' +! stdout '"TagPrefix"' +! stdout '"TagSum"' +stdout '"RepoSum": "r1:8dnv906Aq1vb9YpNl9pslfva0VfG9enKb6O6NWs0xF0="' +stdout '"Hash": "1e531550e864b16f25013cfbbf2d8e7cf07a0374"' + +# go mod download vcstest/tagtests@v0.2.2 should print origin info, no TagSum or RepoSum needed. +go mod download -x -json vcs-test.golang.org/hg/tagtests.hg@v0.2.2 +cp stdout tagtestsv022.json +stdout '"Version": "v0.2.2"' +! stdout '"Query":' +stdout '"VCS": "hg"' +stdout '"URL": ".*/hg/tagtests"' +! stdout '"TagPrefix"' +! stdout '"TagSum"' +! stdout '"RepoSum"' +stdout '"Ref": "v0.2.2"' +stdout '"Hash": "1e531550e864b16f25013cfbbf2d8e7cf07a0374"' + +# go mod download vcstest/tagtests@default needs a RepoSum again +go mod download -x -json vcs-test.golang.org/hg/tagtests.hg@default +cp stdout tagtestsdefault.json +stdout '"Version": "v0.2.3-0.20190509225625-8d0b18b816df"' +stdout '"Query": "default"' +stdout '"VCS": "hg"' +stdout '"URL": ".*/hg/tagtests"' +! stdout '"TagPrefix"' +! stdout '"TagSum"' +stdout '"RepoSum": "r1:8dnv906Aq1vb9YpNl9pslfva0VfG9enKb6O6NWs0xF0="' +stdout '"Hash": "8d0b18b816df5e9c564761b405b1d7949c24ee6b"' + +# go mod download vcstest/prefixtagtests should invoke hg, print origin info +go mod download -x -json vcs-test.golang.org/hg/prefixtagtests.hg/sub@latest +stderr 'hg( .*)* pull' +cp stdout prefixtagtests.json +stdout '"Version": "v0.0.10"' +stdout '"Query": "latest"' +stdout '"VCS": "hg"' +stdout '"URL": ".*/hg/prefixtagtests"' +stdout '"Subdir": "sub"' +stdout '"Ref": "sub/v0.0.10"' +! stdout '"TagPrefix"' +! stdout '"TagSum"' +stdout '"RepoSum": "r1:YWOcei109p5Kohsr5xnSYlaQXmpT3iWZHZhRbfMoTkc="' +stdout '"Hash": "1cc0dfcc254cb8901799e7f7ae182c04019b7a88"' + +# go mod download of a bunch of these should fail (some are invalid) but write good JSON for later +! go mod download -json vcs-test.golang.org/hg/hello.hg@latest vcs-test.golang.org/hg/hello.hg/v9@latest vcs-test.golang.org/hg/hello.hg/sub/v9@latest vcs-test.golang.org/hg/tagtests.hg@latest vcs-test.golang.org/hg/tagtests.hg@v0.2.2 vcs-test.golang.org/hg/tagtests.hg@default +cp stdout all.json + +# clean the module cache, make sure that makes go mod download re-run hg pull, clean again +go clean -modcache +go mod download -x -json vcs-test.golang.org/hg/hello.hg@latest +stderr 'hg( .*)* pull' +go clean -modcache + +# reuse go mod download vcstest/hello result +go clean -modcache +go mod download -reuse=hello.json -x -json vcs-test.golang.org/hg/hello.hg@latest +! stderr 'hg( .*)* pull' +stdout '"Reuse": true' +stdout '"Version": "v0.0.0-20170922011414-e483a7d9f8c9"' +stdout '"VCS": "hg"' +stdout '"URL": ".*/hg/hello"' +! stdout '"TagPrefix"' +stdout '"RepoSum": "r1:blLvkhBriVMV[+]6Il4Ub43wlyWXIe1NpobTelF0peaG0="' +stdout '"Ref": "tip"' +stdout '"Hash": "e483a7d9f8c9b4bc57430bdd8f81f0a65e4011c0"' +! stdout '"(Dir|Info|GoMod|Zip)"' + +# reuse go mod download vcstest/hello pseudoversion result +go clean -modcache +go mod download -reuse=hellopseudo.json -x -json vcs-test.golang.org/hg/hello.hg@v0.0.0-20170922011414-e483a7d9f8c9 +! stderr 'hg( .*)* pull' +stdout '"Reuse": true' +stdout '"Version": "v0.0.0-20170922011414-e483a7d9f8c9"' +stdout '"VCS": "hg"' +stdout '"URL": ".*/hg/hello"' +! stdout '"(Query|TagPrefix|TagSum|Ref)"' +stdout '"Hash": "e483a7d9f8c9b4bc57430bdd8f81f0a65e4011c0"' +! stdout '"(Dir|Info|GoMod|Zip|RepoSum)"' + +# reuse go mod download vcstest/hello@hash +go clean -modcache +go mod download -reuse=hellohash.json -x -json vcs-test.golang.org/hg/hello.hg@e483a7d9f8c9 +! stderr 'hg( .*)* pull' +stdout '"Reuse": true' +stdout '"Query": "e483a7d9f8c9"' +stdout '"Version": "v0.0.0-20170922011414-e483a7d9f8c9"' +stdout '"VCS": "hg"' +stdout '"URL": ".*/hg/hello"' +! stdout '"(TagPrefix|Ref)"' +stdout '"RepoSum": "r1:blLvkhBriVMV[+]6Il4Ub43wlyWXIe1NpobTelF0peaG0="' +stdout '"Hash": "e483a7d9f8c9b4bc57430bdd8f81f0a65e4011c0"' +! stdout '"(Dir|Info|GoMod|Zip)"' + +# reuse go mod download vcstest/hello/v9 error result +go clean -modcache +! go mod download -reuse=hellov9.json -x -json vcs-test.golang.org/hg/hello.hg/v9@latest +! stderr 'hg( .*)* pull' +stdout '"Reuse": true' +stdout '"Error":.*no matching versions' +! stdout '"TagPrefix"' +stdout '"RepoSum": "r1:blLvkhBriVMV[+]6Il4Ub43wlyWXIe1NpobTelF0peaG0="' +stdout '"Hash": "e483a7d9f8c9b4bc57430bdd8f81f0a65e4011c0"' +! stdout '"(Dir|Info|GoMod|Zip)"' + +# reuse go mod download vcstest/hello/sub/v9 error result +go clean -modcache +! go mod download -reuse=hellosubv9.json -x -json vcs-test.golang.org/hg/hello.hg/sub/v9@latest +! stderr 'hg( .*)* pull' +stdout '"Reuse": true' +stdout '"Error":.*no matching versions' +! stdout '"TagPrefix"' +stdout '"RepoSum": "r1:blLvkhBriVMV[+]6Il4Ub43wlyWXIe1NpobTelF0peaG0="' +stdout '"Ref": "tip"' +stdout '"Hash": "e483a7d9f8c9b4bc57430bdd8f81f0a65e4011c0"' +! stdout '"(Dir|Info|GoMod|Zip)"' + +# reuse go mod download vcstest/hello@nonexist +go clean -modcache +! go mod download -reuse=hellononexist.json -x -json vcs-test.golang.org/hg/hello.hg@nonexist +! stderr 'hg( .*)* pull' +stdout '"Reuse": true' +stdout '"Version": "nonexist"' +stdout '"Error":.*unknown revision nonexist' +stdout '"RepoSum": "r1:blLvkhBriVMV[+]6Il4Ub43wlyWXIe1NpobTelF0peaG0="' +! stdout '"(TagPrefix|TagSum|Ref|Hash)"' +! stdout '"(Dir|Info|GoMod|Zip)"' + +# reuse go mod download vcstest/hello@1234567890123456789012345678901234567890 +go clean -modcache +! go mod download -reuse=hellononhash.json -x -json vcs-test.golang.org/hg/hello.hg@1234567890123456789012345678901234567890 +! stderr 'hg( .*)* pull' +stdout '"Reuse": true' +stdout '"Version": "1234567890123456789012345678901234567890"' +stdout '"Error":.*unknown revision 1234567890123456789012345678901234567890' +stdout '"RepoSum": "r1:blLvkhBriVMV[+]6Il4Ub43wlyWXIe1NpobTelF0peaG0="' +! stdout '"(TagPrefix|TagSum|Ref|Hash)"' +! stdout '"(Dir|Info|GoMod|Zip)"' + +# reuse go mod download vcstest/hello@v0.0.0-20220101120101-123456789abc +go clean -modcache +! go mod download -reuse=hellononpseudo.json -x -json vcs-test.golang.org/hg/hello.hg@v0.0.0-20220101120101-123456789abc +! stderr 'hg( .*)* pull' +stdout '"Reuse": true' +stdout '"Version": "v0.0.0-20220101120101-123456789abc"' +stdout '"Error":.*unknown revision 123456789abc' +stdout '"RepoSum": "r1:blLvkhBriVMV[+]6Il4Ub43wlyWXIe1NpobTelF0peaG0="' +! stdout '"(TagPrefix|TagSum|Ref|Hash)"' +! stdout '"(Dir|Info|GoMod|Zip)"' + +# reuse go mod download vcstest/tagtests result +go clean -modcache +go mod download -reuse=tagtests.json -x -json vcs-test.golang.org/hg/tagtests.hg@latest +! stderr 'hg( .*)* pull' +stdout '"Reuse": true' +stdout '"Version": "v0.2.2"' +stdout '"Query": "latest"' +stdout '"VCS": "hg"' +stdout '"URL": ".*/hg/tagtests"' +! stdout '"TagPrefix"' +stdout '"RepoSum": "r1:8dnv906Aq1vb9YpNl9pslfva0VfG9enKb6O6NWs0xF0="' +stdout '"Hash": "1e531550e864b16f25013cfbbf2d8e7cf07a0374"' +! stdout '"(Dir|Info|GoMod|Zip)"' + +# reuse go mod download vcstest/tagtests@v0.2.2 result +go clean -modcache +go mod download -reuse=tagtestsv022.json -x -json vcs-test.golang.org/hg/tagtests.hg@v0.2.2 +! stderr 'hg( .*)* pull' +stdout '"Reuse": true' +stdout '"Version": "v0.2.2"' +! stdout '"Query":' +stdout '"VCS": "hg"' +stdout '"URL": ".*/hg/tagtests"' +! stdout '"TagPrefix"' +! stdout '"TagSum"' +stdout '"Ref": "v0.2.2"' +stdout '"Hash": "1e531550e864b16f25013cfbbf2d8e7cf07a0374"' +! stdout '"(Dir|Info|GoMod|Zip|RepoSum)"' + +# reuse go mod download vcstest/tagtests@default result +go clean -modcache +go mod download -reuse=tagtestsdefault.json -x -json vcs-test.golang.org/hg/tagtests.hg@default +! stderr 'hg( .*)* pull' +stdout '"Reuse": true' +stdout '"Version": "v0.2.3-0.20190509225625-8d0b18b816df"' +stdout '"Query": "default"' +stdout '"VCS": "hg"' +stdout '"URL": ".*/hg/tagtests"' +! stdout '"TagPrefix"' +stdout '"RepoSum": "r1:8dnv906Aq1vb9YpNl9pslfva0VfG9enKb6O6NWs0xF0="' +stdout '"Ref": "default"' +stdout '"Hash": "8d0b18b816df5e9c564761b405b1d7949c24ee6b"' +! stdout '"(Dir|Info|GoMod|Zip)"' + +# reuse go mod download vcstest/tagtests@default result again with all.json +go clean -modcache +go mod download -reuse=all.json -x -json vcs-test.golang.org/hg/tagtests.hg@default +! stderr 'hg( .*)* pull' +stdout '"Reuse": true' +stdout '"Version": "v0.2.3-0.20190509225625-8d0b18b816df"' +stdout '"Query": "default"' +stdout '"VCS": "hg"' +stdout '"URL": ".*/hg/tagtests"' +! stdout '"TagPrefix"' +stdout '"RepoSum": "r1:8dnv906Aq1vb9YpNl9pslfva0VfG9enKb6O6NWs0xF0="' +stdout '"Ref": "default"' +stdout '"Hash": "8d0b18b816df5e9c564761b405b1d7949c24ee6b"' +! stdout '"(Dir|Info|GoMod|Zip)"' + +# go mod download vcstest/prefixtagtests result with json +go clean -modcache +go mod download -reuse=prefixtagtests.json -x -json vcs-test.golang.org/hg/prefixtagtests.hg/sub@latest +! stderr 'hg( .*)* pull' +stdout '"Version": "v0.0.10"' +stdout '"Query": "latest"' +stdout '"VCS": "hg"' +stdout '"URL": ".*/hg/prefixtagtests"' +stdout '"Subdir": "sub"' +stdout '"RepoSum": "r1:YWOcei109p5Kohsr5xnSYlaQXmpT3iWZHZhRbfMoTkc="' +stdout '"Ref": "sub/v0.0.10"' +stdout '"Hash": "1cc0dfcc254cb8901799e7f7ae182c04019b7a88"' +! stdout '"(Dir|Info|GoMod|Zip)"' + +# reuse the bulk results with all.json +go clean -modcache +! go mod download -reuse=all.json -json vcs-test.golang.org/hg/hello.hg@latest vcs-test.golang.org/hg/hello.hg/v9@latest vcs-test.golang.org/hg/hello.hg/sub/v9@latest vcs-test.golang.org/hg/tagtests.hg@latest vcs-test.golang.org/hg/tagtests.hg@v0.2.2 vcs-test.golang.org/hg/tagtests.hg@default +! stderr 'hg( .*)* pull' +stdout '"Reuse": true' +! stdout '"(Dir|Info|GoMod|Zip)"' + +# reuse attempt with stale hash should reinvoke hg, not report reuse +go clean -modcache +cp tagtestsv022.json tagtestsv022badhash.json +replace '1e5315' '1e5315XXX' tagtestsv022badhash.json +go mod download -reuse=tagtestsv022badhash.json -x -json vcs-test.golang.org/hg/tagtests.hg@v0.2.2 +stderr 'hg( .*)* pull' +! stdout '"Reuse": true' +stdout '"Version": "v0.2.2"' +! stdout '"Query"' +stdout '"VCS": "hg"' +stdout '"URL": ".*/hg/tagtests"' +! stdout '"(TagPrefix|TagSum|RepoSum)"' +stdout '"Ref": "v0.2.2"' +stdout '"Hash": "1e531550e864b16f25013cfbbf2d8e7cf07a0374"' +stdout '"Dir"' +stdout '"Info"' +stdout '"GoMod"' +stdout '"Zip"' + +# reuse with stale repo URL +go clean -modcache +cp tagtestsv022.json tagtestsv022badurl.json +replace 'hg/tagtests\"' 'hg/tagtestsXXX\"' tagtestsv022badurl.json +go mod download -reuse=tagtestsv022badurl.json -x -json vcs-test.golang.org/hg/tagtests.hg@v0.2.2 +! stdout '"Reuse": true' +stdout '"URL": ".*/hg/tagtests"' +stdout '"Dir"' +stdout '"Info"' +stdout '"GoMod"' +stdout '"Zip"' + +# reuse with stale VCS +go clean -modcache +cp tagtestsv022.json tagtestsv022badvcs.json +replace '\"hg\"' '\"hgXXX\"' tagtestsv022badvcs.json +go mod download -reuse=tagtestsv022badvcs.json -x -json vcs-test.golang.org/hg/tagtests.hg@v0.2.2 +! stdout '"Reuse": true' +stdout '"URL": ".*/hg/tagtests"' +! stdout '"RepoSum"' + +# reuse with stale Dir +go clean -modcache +cp tagtestsv022.json tagtestsv022baddir.json +replace '\"VCS\":' '\"Subdir\":\"subdir\", \"VCS\":' tagtestsv022baddir.json +go mod download -reuse=tagtestsv022baddir.json -x -json vcs-test.golang.org/hg/tagtests.hg@v0.2.2 +! stdout '"Reuse": true' +stdout '"URL": ".*/hg/tagtests"' +! stdout '"RepoSum"' + +# reuse with stale RepoSum +go clean -modcache +cp tagtests.json tagtestsbadreposum.json +replace '8dnv90' '8dnv90XXX' tagtestsbadreposum.json +go mod download -reuse=tagtestsbadreposum.json -x -json vcs-test.golang.org/hg/tagtests.hg@latest +! stdout '"Reuse": true' +stdout '"RepoSum": "r1:8dnv906Aq1vb9YpNl9pslfva0VfG9enKb6O6NWs0xF0="' + +# go list on repo with no tags +go clean -modcache +go list -x -json -m -retracted -versions vcs-test.golang.org/hg/hello.hg@latest +stderr 'hg( .*)* pull' +cp stdout hellolist.json +! stdout '"Versions"' +stdout '"Version": "v0.0.0-20170922011414-e483a7d9f8c9"' +stdout '"VCS": "hg"' +stdout '"URL": ".*/hg/hello"' +stdout '"Query": "latest"' +stdout '"Hash": "e483a7d9f8c9b4bc57430bdd8f81f0a65e4011c0"' +stdout '"RepoSum": "r1:blLvkhBriVMV[+]6Il4Ub43wlyWXIe1NpobTelF0peaG0="' + +# reuse go list on repo with no tags +go clean -modcache +go list -x -reuse=hellolist.json -json -m -retracted -versions vcs-test.golang.org/hg/hello.hg@latest +! stderr 'hg( .*)* pull' +stdout '"Reuse": true' +! stdout '"Versions"' +stdout '"Version": "v0.0.0-20170922011414-e483a7d9f8c9"' +stdout '"VCS": "hg"' +stdout '"URL": ".*/hg/hello"' +stdout '"Query": "latest"' +stdout '"RepoSum": "r1:blLvkhBriVMV[+]6Il4Ub43wlyWXIe1NpobTelF0peaG0="' +stdout '"Hash": "e483a7d9f8c9b4bc57430bdd8f81f0a65e4011c0"' + +# reuse with stale list +go clean -modcache +cp hellolist.json hellolistbad.json +replace 'blLvkhBri' 'ZZZ' hellolistbad.json +go clean -modcache +go list -x -reuse=hellolistbad.json -json -m -retracted -versions vcs-test.golang.org/hg/hello.hg@latest +stderr 'hg( .*)* pull' +! stdout '"Reuse": true' +stdout '"RepoSum": "r1:blLvkhBriVMV[+]6Il4Ub43wlyWXIe1NpobTelF0peaG0="' + +# go list on repo with tags +go clean -modcache +go list -x -json -m -retracted -versions vcs-test.golang.org/hg/tagtests.hg@latest +cp stdout taglist.json +stderr 'hg( .*)* pull' +stdout '"Versions":' +stdout '"v0.2.1"' +stdout '"v0.2.2"' +stdout '"Version": "v0.2.2"' +stdout '"VCS": "hg"' +stdout '"URL": ".*/hg/tagtests"' +stdout '"Ref": "v0.2.2"' +stdout '"Hash": "1e531550e864b16f25013cfbbf2d8e7cf07a0374"' +stdout '"RepoSum": "r1:8dnv906Aq1vb9YpNl9pslfva0VfG9enKb6O6NWs0xF0="' + +# reuse go list on repo with tags +go clean -modcache +go list -reuse=taglist.json -x -json -m -retracted -versions vcs-test.golang.org/hg/tagtests.hg@latest +! stderr 'hg( .*)* pull' +stdout '"Reuse": true' +stdout '"Versions":' +stdout '"v0.2.1"' +stdout '"v0.2.2"' +stdout '"Version": "v0.2.2"' +stdout '"VCS": "hg"' +stdout '"URL": ".*/hg/tagtests"' +stdout '"Ref": "v0.2.2"' +stdout '"Hash": "1e531550e864b16f25013cfbbf2d8e7cf07a0374"' +stdout '"RepoSum": "r1:8dnv906Aq1vb9YpNl9pslfva0VfG9enKb6O6NWs0xF0="' + +# reuse with stale list +go clean -modcache +cp taglist.json taglistbad.json +replace '8dnv906' 'ZZZ' taglistbad.json +go list -reuse=taglistbad.json -x -json -m -retracted -versions vcs-test.golang.org/hg/tagtests.hg@latest +stderr 'hg( .*)* pull' +! stdout '"Reuse": true' +stdout '"RepoSum": "r1:8dnv906Aq1vb9YpNl9pslfva0VfG9enKb6O6NWs0xF0="' diff --git a/src/cmd/go/testdata/script/tool_build_as_needed.txt b/src/cmd/go/testdata/script/tool_build_as_needed.txt index 8868ed3085b..e9bb8d34f35 100644 --- a/src/cmd/go/testdata/script/tool_build_as_needed.txt +++ b/src/cmd/go/testdata/script/tool_build_as_needed.txt @@ -40,13 +40,15 @@ rm $TOOLDIR/test2json$GOEXE go tool -n test2json ! stdout $NEWTOOLDIR${/}test2json$GOEXE # Set GOOS/GOARCH to different values than host GOOS/GOARCH. -env GOOS=windows -[GOOS:windows] env GOOS=linux -env GOARCH=arm64 -[GOARCH:arm64] env GOARCH=amd64 +env GOOS=js +env GOARCH=wasm # Control case: go run shouldn't work because it respects # GOOS/GOARCH, and we can't execute non-native binary. -! go run cmd/test2json -exec='' -# But go tool should because it doesn't respect GOOS/GOARCH. +# Don't actually run the binary because maybe we can. +# (Maybe the user has a go_js_wasm_exec installed.) +# Instead just look to see that the right binary got linked. +go run -n cmd/test2json +stderr modinfo.*GOARCH=wasm.*GOOS=js +# go tool should succeed because it doesn't respect GOOS/GOARCH. go tool test2json stdout '{"Action":"start"}' diff --git a/src/cmd/go/testdata/script/vet_asm.txt b/src/cmd/go/testdata/script/vet_asm.txt index 8aa69ce1a3c..c046773a06c 100644 --- a/src/cmd/go/testdata/script/vet_asm.txt +++ b/src/cmd/go/testdata/script/vet_asm.txt @@ -1,12 +1,12 @@ -env GO111MODULE=off - # Issue 27665. Verify that "go vet" analyzes non-Go files. -[!GOARCH:amd64] skip +env GO111MODULE=off +env GOARCH=amd64 + ! go vet -asmdecl a stderr 'f: invalid MOVW of x' -# -c flag shows context +# -c=n flag shows n lines of context ! go vet -c=2 -asmdecl a stderr '...invalid MOVW...' stderr '1 .*TEXT' diff --git a/src/cmd/go/testdata/script/vet_basic.txt b/src/cmd/go/testdata/script/vet_basic.txt new file mode 100644 index 00000000000..5ae66438ea3 --- /dev/null +++ b/src/cmd/go/testdata/script/vet_basic.txt @@ -0,0 +1,125 @@ +# Test basic features of "go vet"/"go fix" CLI. +# +# The example relies on two analyzers: +# - hostport (which is included in both the fix and vet suites), and +# - printf (which is only in the vet suite). +# Each reports one diagnostic with a fix. + +# vet default flags print diagnostics to stderr. Diagnostic => nonzero exit. +! go vet example.com/x +stderr 'does not work with IPv6' +stderr 'non-constant format string in call to fmt.Sprintf' + +# -hostport runs only one analyzer. Diagnostic => failure. +! go vet -hostport example.com/x +stderr 'does not work with IPv6' +! stderr 'non-constant format string' + +# -timeformat runs only one analyzer. No diagnostics => success. +go vet -timeformat example.com/x +! stderr . + +# JSON output includes diagnostics and fixes. Always success. +go vet -json example.com/x +! stderr . +stdout '"example.com/x": {' +stdout '"hostport":' +stdout '"message": "address format .* does not work with IPv6",' +stdout '"suggested_fixes":' +stdout '"message": "Replace fmt.Sprintf with net.JoinHostPort",' + +# vet -fix -diff displays a diff. +go vet -fix -diff example.com/x +stdout '\-var _ = fmt.Sprintf\(s\)' +stdout '\+var _ = fmt.Sprintf\("%s", s\)' +stdout '\-var _, _ = net.Dial\("tcp", fmt.Sprintf\("%s:%d", s, 80\)\)' +stdout '\+var _, _ = net.Dial\("tcp", net.JoinHostPort\(s, "80"\)\)' + +# vet -fix quietly applies the vet suite fixes. +cp x.go x.go.bak +go vet -fix example.com/x +grep 'fmt.Sprintf\("%s", s\)' x.go +grep 'net.JoinHostPort' x.go +! stderr . +cp x.go.bak x.go + +! go vet -diff example.com/x +stderr 'go vet -diff flag requires -fix' + +# go fix applies the fix suite fixes. +go fix example.com/x +grep 'net.JoinHostPort' x.go +! grep 'fmt.Sprintf\("%s", s\)' x.go +! stderr . +cp x.go.bak x.go + +# Show diff of fixes from the fix suite. +go fix -diff example.com/x +! stdout '\-var _ = fmt.Sprintf\(s\)' +stdout '\-var _, _ = net.Dial\("tcp", fmt.Sprintf\("%s:%d", s, 80\)\)' +stdout '\+var _, _ = net.Dial\("tcp", net.JoinHostPort\(s, "80"\)\)' + +# Show fix-suite fixes in JSON form. +go fix -json example.com/x +! stderr . +stdout '"example.com/x": {' +stdout '"hostport":' +stdout '"message": "address format .* does not work with IPv6",' +stdout '"suggested_fixes":' +stdout '"message": "Replace fmt.Sprintf with net.JoinHostPort",' +! stdout '"printf":' +! stdout '"message": "non-constant format string.*",' +! stdout '"message": "Insert.*%s.*format.string",' + +# Show vet-suite fixes in JSON form. +go vet -fix -json example.com/x +! stderr . +stdout '"example.com/x": {' +stdout '"hostport":' +stdout '"message": "address format .* does not work with IPv6",' +stdout '"suggested_fixes":' +stdout '"message": "Replace fmt.Sprintf with net.JoinHostPort",' +stdout '"printf":' +stdout '"message": "non-constant format string.*",' +stdout '"suggested_fixes":' +stdout '"message": "Insert.*%s.*format.string",' + +# Reject -diff + -json. +! go fix -diff -json example.com/x +stderr '-json and -diff cannot be used together' + +# Legacy way of selecting fixers is a no-op. +go fix -fix=old1,old2 example.com/x +stderr 'go fix: the -fix=old1,old2 flag is obsolete and has no effect' + +# -c=n flag shows n lines of context. +! go vet -c=2 -printf example.com/x +stderr 'x.go:12:21: non-constant format string in call to fmt.Sprintf' +! stderr '9 ' +stderr '10 ' +stderr '11 // This call...' +stderr '12 var _ = fmt.Sprintf\(s\)' +stderr '13 ' +stderr '14 ' +! stderr '15 ' + +-- go.mod -- +module example.com/x +go 1.25 + +-- x.go -- +package x + + +import ( + "fmt" + "net" +) + +var s string + +// This call yields a "non-constant format string" diagnostic, with a fix (go vet only). +var _ = fmt.Sprintf(s) + +// This call yields a hostport diagnostic, with a fix (go vet and go fix). +var _, _ = net.Dial("tcp", fmt.Sprintf("%s:%d", s, 80)) diff --git a/src/cmd/go/testdata/script/vet_cache.txt b/src/cmd/go/testdata/script/vet_cache.txt new file mode 100644 index 00000000000..c84844000a4 --- /dev/null +++ b/src/cmd/go/testdata/script/vet_cache.txt @@ -0,0 +1,24 @@ +# Test that go vet's caching of vet tool actions replays +# the recorded stderr output even after a cache hit. + +# Set up fresh GOCACHE. +env GOCACHE=$WORK/gocache + +# First time is a cache miss. +! go vet example.com/a +stderr 'fmt.Sprint call has possible Printf formatting directive' + +# Second time is assumed to be a cache hit for the stdout JSON, +# but we don't bother to assert it. Same diagnostics again. +! go vet example.com/a +stderr 'fmt.Sprint call has possible Printf formatting directive' + +-- go.mod -- +module example.com + +-- a/a.go -- +package a + +import "fmt" + +var _ = fmt.Sprint("%s") // oops! diff --git a/src/cmd/go/testdata/script/vet_flags.txt b/src/cmd/go/testdata/script/vet_flags.txt index 21606dc4f1d..e274dbedb39 100644 --- a/src/cmd/go/testdata/script/vet_flags.txt +++ b/src/cmd/go/testdata/script/vet_flags.txt @@ -20,7 +20,8 @@ stderr '-unsafeptr' ! stderr '-unsafeptr=false' # -unreachable is disabled during test but on during plain vet. -go test -n runtime +# The -a makes sure the vet result is not cached, or else we won't print the command line. +go test -a -n runtime stderr '-unreachable=false' # A flag terminator should be allowed before the package list. @@ -63,16 +64,16 @@ go test -n -vet= -run=none . stderr '[/\\]vet'$GOEXE'["]? .* -errorsas .* ["]?\$WORK[/\\][^ ]*[/\\]vet\.cfg' # "go test" on a standard package should by default disable an explicit list. -go test -n -run=none encoding/binary +go test -a -n -run=none encoding/binary stderr '[/\\]vet'$GOEXE'["]? -unsafeptr=false -unreachable=false ["]?\$WORK[/\\][^ ]*[/\\]vet\.cfg' -go test -n -vet= -run=none encoding/binary +go test -a -n -vet= -run=none encoding/binary stderr '[/\\]vet'$GOEXE'["]? -unsafeptr=false -unreachable=false ["]?\$WORK[/\\][^ ]*[/\\]vet\.cfg' # Both should allow users to override via the -vet flag. -go test -n -vet=unreachable -run=none . +go test -a -n -vet=unreachable -run=none . stderr '[/\\]vet'$GOEXE'["]? -unreachable ["]?\$WORK[/\\][^ ]*[/\\]vet\.cfg' -go test -n -vet=unreachable -run=none encoding/binary +go test -a -n -vet=unreachable -run=none encoding/binary stderr '[/\\]vet'$GOEXE'["]? -unreachable ["]?\$WORK[/\\][^ ]*[/\\]vet\.cfg' -- go.mod -- diff --git a/src/cmd/go/testdata/vcstest/git/gitrepo-sha256.txt b/src/cmd/go/testdata/vcstest/git/gitrepo-sha256.txt index 81b9a71c125..15068a249ef 100644 --- a/src/cmd/go/testdata/vcstest/git/gitrepo-sha256.txt +++ b/src/cmd/go/testdata/vcstest/git/gitrepo-sha256.txt @@ -1,3 +1,5 @@ +[!git-min-vers:v2.29] skip + handle git # This is a sha256 version of gitrepo1.txt (which uses sha1 hashes) diff --git a/src/cmd/go/testdata/vcstest/git/legacytest.txt b/src/cmd/go/testdata/vcstest/git/legacytest.txt new file mode 100644 index 00000000000..5846983cef3 --- /dev/null +++ b/src/cmd/go/testdata/vcstest/git/legacytest.txt @@ -0,0 +1,118 @@ +handle git + +env GIT_AUTHOR_NAME='Russ Cox' +env GIT_AUTHOR_EMAIL='rsc@golang.org' +env GIT_COMMITTER_NAME=$GIT_AUTHOR_NAME +env GIT_COMMITTER_EMAIL=$GIT_AUTHOR_EMAIL + +git init +git branch -M master + +at 2018-07-17T12:41:39-04:00 +cp x_cf92c7b.go x.go +git add x.go +git commit -m 'initial commit' + +at 2018-07-17T12:41:57-04:00 +cp x_52853eb.go x.go +git commit -m 'add X' x.go + +at 2018-07-17T12:42:07-04:00 +cp x_7fff7f3.go x.go +git commit -m 'gofmt' x.go +git tag v1.0.0 + +at 2018-07-17T12:42:28-04:00 +cp x_fa4f5d6.go x.go +git commit -m 'X->XX' x.go + +at 2018-07-17T12:42:36-04:00 +cp x_d7ae1e4.go x.go +git commit -m 'gofmt' x.go +git tag v2.0.0 + +at 2018-07-17T12:42:53-04:00 +cp x_7303f77.go x.go +git commit -m 'add XXX' x.go + +at 2018-07-17T12:47:59-04:00 +git checkout v1.0.0 +cp x_1abc5ff.go x.go +git commit -m 'comment' x.go + +at 2018-07-17T12:48:22-04:00 +cp x_731e3b1.go x.go +git commit -m 'prerelease' x.go +git tag v1.1.0-pre + +at 2018-07-17T12:48:49-04:00 +cp x_fb3c628.go x.go +git commit -m 'working' x.go + +at 2018-07-17T12:49:05-04:00 +cp x_9f6f860.go x.go +git commit -m 'v1.2.0' x.go +git tag v1.2.0 + +at 2018-07-17T12:49:42-04:00 +cp x_d2d4c3e.go x.go +git commit -m 'more' x.go +git tag morework + +git show-ref --tags --heads +cmp stdout .git-refs + +-- .git-refs -- +7303f77963648d5f1ec5e55eccfad8e14035866c refs/heads/master +d2d4c3ea66230e7ad6fbd8f0ecd8c0f851392364 refs/tags/morework +7fff7f3417faa4a795f9518bc2bef05147a1d6c0 refs/tags/v1.0.0 +731e3b12a0272dcafb560b8fa6a4e9ffb20ef5c9 refs/tags/v1.1.0-pre +9f6f860fe5c92cd835fdde2913aca8db9ce63373 refs/tags/v1.2.0 +d7ae1e4b368320e7a577fc8a9efc1e78aacac52a refs/tags/v2.0.0 +-- x_1abc5ff.go -- +package legacytest + +// add comment +const X = 1 +-- x_52853eb.go -- +package legacytest +const X = 1 +-- x_7303f77.go -- +package legacytest + +const XX = 2 + +const XXX = 3 +-- x_731e3b1.go -- +package legacytest + +// add comment again +const X = 1 +-- x_7fff7f3.go -- +package legacytest + +const X = 1 +-- x_9f6f860.go -- +package legacytest + +// add comment again!!! +const X = 1 +-- x_cf92c7b.go -- +package legacytest +-- x_d2d4c3e.go -- +package legacytest + +// add comment hack hack hack +const X = 1 +-- x_d7ae1e4.go -- +package legacytest + +const XX = 2 +-- x_fa4f5d6.go -- +package legacytest +const XX = 2 +-- x_fb3c628.go -- +package legacytest + +// add comment fixed +const X = 1 diff --git a/src/cmd/go/testdata/vcstest/hg/hgrepo1.txt b/src/cmd/go/testdata/vcstest/hg/hgrepo1.txt index 1e4b83aae6c..42d81e9d39a 100644 --- a/src/cmd/go/testdata/vcstest/hg/hgrepo1.txt +++ b/src/cmd/go/testdata/vcstest/hg/hgrepo1.txt @@ -1,153 +1,97 @@ handle hg -mkdir git -cd git +hg init -env GIT_AUTHOR_NAME='Russ Cox' -env GIT_AUTHOR_EMAIL='rsc@golang.org' -env GIT_COMMITTER_NAME=$GIT_AUTHOR_NAME -env GIT_COMMITTER_EMAIL=$GIT_AUTHOR_EMAIL - -git init - -at 2018-04-17T15:43:22-04:00 +env date=2018-04-17T15:43:22-04:00 unquote '' cp stdout README -git add README -git commit -a -m 'empty README' -git branch -m master -git tag v1.2.3 +hg add README +hg commit --user=rsc --date=$date -m 'empty README' +hg branch tagbranch +hg tag --user=rsc --date=$date v1.2.3 +hg update default -at 2018-04-17T15:45:48-04:00 -git branch v2 -git checkout v2 +env date=2018-04-17T15:45:48-04:00 +hg branch v2 echo 'v2' cp stdout v2 -git add v2 -git commit -a -m 'v2' -git tag v2.3 -git tag v2.0.1 -git branch v2.3.4 -git tag branch-v2.3.4 +hg add v2 +hg commit --user=rsc --date=$date -m 'v2' +hg update tagbranch +hg tag --user=rsc --date=$date -r v2 v2.3 +hg tag --user=rsc --date=$date -r v2 v2.0.1 +hg update v2 +hg branch v2.3.4 -at 2018-04-17T16:00:19-04:00 +env date=2018-04-17T16:00:19-04:00 echo 'intermediate' cp stdout foo.txt -git add foo.txt -git commit -a -m 'intermediate' +hg add foo.txt +hg commit --user=rsc --date=$date -m 'intermediate' -at 2018-04-17T16:00:32-04:00 +env date=2018-04-17T16:00:32-04:00 echo 'another' cp stdout another.txt -git add another.txt -git commit -a -m 'another' -git tag v2.0.2 -git tag branch-v2 +hg add another.txt +hg commit --user=rsc --date=$date -m 'another' +hg update tagbranch +hg tag --user=rsc --date=$date -r v2.3.4 v2.0.2 -at 2018-04-17T16:16:52-04:00 -git checkout master -git branch v3 -git checkout v3 +env date=2018-04-17T16:16:52-04:00 +hg update default +hg branch v3 mkdir v3/sub/dir echo 'v3/sub/dir/file' cp stdout v3/sub/dir/file.txt -git add v3 -git commit -a -m 'add v3/sub/dir/file.txt' -git tag branch-v3 +hg add v3 +hg commit --user=rsc --date=$date -m 'add v3/sub/dir/file.txt' -at 2018-04-17T22:23:00-04:00 -git checkout master -git tag -a v1.2.4-annotated -m 'v1.2.4-annotated' +env date=2018-04-17T22:23:00-04:00 +hg update default +hg tag --user=rsc --date=$date -r v1.2.3 v1.2.4-annotated -cd .. - -hg init -hg convert --datesort ./git . -rm ./git - -hg update -C v2 -hg branch v2 +env date=2018-06-27T12:15:24-04:00 +hg update v2 unquote '' cp stdout dummy hg add dummy -hg commit --user 'Russ Cox ' --date '2018-06-27T12:15:24-04:00' -m 'dummy' - -# 'hg convert' blindly stamps a tag-update commit at the end of whatever branch -# happened to contain the last converted commit — in this case, v3. However, the -# original vcs-test.golang.org copy of this repo had this commit on the v3 -# branch as a descendent of 'add v3/sub/dir/file.txt', so that's where we put it -# here. That leaves the convert-repo 'update tags' commit only reachable as the -# head of the default branch. -hg update -r 4 - -hg branch v3 -unquote '' -cp stdout dummy -hg add dummy -hg commit --user 'Russ Cox ' --date '2018-06-27T12:15:45-04:00' -m 'dummy' +hg commit --user=rsc --date=$date -m 'dummy' +env date=2018-06-27T12:16:10-04:00 hg update v2.3.4 hg branch v2.3.4 unquote '' cp stdout dummy hg add dummy -hg commit --user 'Russ Cox ' --date '2018-06-27T12:16:10-04:00' -m 'dummy' +hg commit --user=rsc --date=$date -m 'dummy' -hg tag --user 'Russ Cox ' --date '2018-06-27T12:16:30-04:00' -m 'Removed tag branch-v2, branch-v3, branch-v2.3.4' --remove branch-v2 branch-v3 branch-v2.3.4 - -# Adding commits to the above branches updates both the branch heads and the -# corresponding bookmarks. -# But apparently at some point it did not do so? The original copy of this repo -# had bookmarks pointing to the base of each branch instead of the tip. 🤔 -# Either way, force the bookmarks we care about to match the original copy of -# the repo. -hg book v2 -r 3 --force -hg book v2.3.4 -r 1 --force -hg book v3 -r 5 --force +hg book v2 -r v2.0.2 --force +hg book v2.3.4 -r v2.0.1 --force hg log -G --debug hg tags cmp stdout .hg-tags - # 'hg convert' leaves an 'update tags' commit on the default branch, and that - # commit always uses the current date (so is not reproducible). Fortunately, - # that commit lands on the 'default' branch and is not tagged as 'tip', so it - # seems to be mostly harmless. However, because it is nondeterministic we - # should avoid listing it here. - # - # Unfortunately, some of our builders are still running Debian 9 “Stretch”, - # which shipped with a version of 'hg' that does not support 'hg branch -r' - # to list branches for specific versions. Although Stretch is past its - # end-of-life date, we need to keep the builders happy until they can be - # turned down (https://go.dev/issue/56414). hg branches -? cmp stdout .hg-branches -stdout 'v2\s+6:9a4f43d231ec' -stdout 'v2.3.4\s+9:18518c07eb8e' -stdout 'v3\s+7:a2cad8a2b1bb' -stdout 'default\s+5:' +cmp stdout .hg-branches -# Likewise, bookmark v3 ends up on the nondeterministic commit. hg bookmarks -? cmp stdout .hg-bookmarks -stdout 'master\s+0:41964ddce118' -stdout 'v2\s+3:8f49ee7a6ddc' -stdout 'v2.3.4\s+1:88fde824ec8b' -stdout 'v3\s+5:.*' +cmp stdout .hg-bookmarks --- .hg-branches -- -v2.3.4 9:18518c07eb8e -v3 7:a2cad8a2b1bb -v2 6:9a4f43d231ec -- .hg-tags -- -tip 9:18518c07eb8e -v2.0.2 3:8f49ee7a6ddc -v2.3 1:88fde824ec8b -v2.0.1 1:88fde824ec8b -v1.2.4-annotated 0:41964ddce118 -v1.2.3 0:41964ddce118 +tip 11:745aacc8b24d +v2.0.2 6:b1ed98abc268 +v2.3 2:a546811101e1 +v2.0.1 2:a546811101e1 +v1.2.4-annotated 0:c0186fb00e50 +v1.2.3 0:c0186fb00e50 +-- .hg-branches -- +v2.3.4 11:745aacc8b24d +v2 10:2b5ca8689628 +default 9:a9a2a32d1392 +v3 8:442174d28f65 +tagbranch 7:1a3473c317b4 -- .hg-bookmarks -- - master 0:41964ddce118 - v2 3:8f49ee7a6ddc - v2.3.4 1:88fde824ec8b + v2 6:b1ed98abc268 + v2.3.4 2:a546811101e1 diff --git a/src/cmd/go/testdata/vcstest/hg/legacytest.txt b/src/cmd/go/testdata/vcstest/hg/legacytest.txt new file mode 100644 index 00000000000..c3f063e2fa2 --- /dev/null +++ b/src/cmd/go/testdata/vcstest/hg/legacytest.txt @@ -0,0 +1,124 @@ +handle hg + +env user='Russ Cox ' + +hg init + +env date=2018-07-17T12:41:39-04:00 +cp x_cf92c7b.go x.go +hg add x.go +hg commit --user=$user --date=$date -m 'initial commit' + +env date=2018-07-17T12:41:57-04:00 +cp x_52853eb.go x.go +hg commit --user=$user --date=$date -m 'add X' x.go + +env date=2018-07-17T12:42:07-04:00 +cp x_7fff7f3.go x.go +hg commit --user=$user --date=$date -m 'gofmt' x.go +hg tag --user=$user --date=$date v1.0.0 + +env date=2018-07-17T12:42:28-04:00 +cp x_fa4f5d6.go x.go +hg commit --user=$user --date=$date -m 'X->XX' x.go + +env date=2018-07-17T12:42:36-04:00 +cp x_d7ae1e4.go x.go +hg commit --user=$user --date=$date -m 'gofmt' x.go +hg tag --user=$user --date=$date v2.0.0 + +env date=2018-07-17T12:42:53-04:00 +cp x_7303f77.go x.go +hg commit --user=$user --date=$date -m 'add XXX' x.go + +env date=2018-07-17T12:47:59-04:00 +hg update v1.0.0 +cp x_1abc5ff.go x.go +hg commit --user=$user --date=$date -m 'comment' x.go + +env date=2018-07-17T12:48:22-04:00 +cp x_731e3b1.go x.go +hg commit --user=$user --date=$date -m 'prerelease' x.go +hg tag --user=$user --date=$date v1.1.0-pre + +env date=2018-07-17T12:48:49-04:00 +cp x_fb3c628.go x.go +hg commit --user=$user --date=$date -m 'working' x.go + +env date=2018-07-17T12:49:05-04:00 +cp x_9f6f860.go x.go +hg commit --user=$user --date=$date -m 'v1.2.0' x.go +hg tag --user=$user --date=$date v1.2.0 + +env date=2018-07-17T12:49:42-04:00 +cp x_d2d4c3e.go x.go +hg commit --user=$user --date=$date -m 'more' x.go +hg tag --user=$user --date=$date morework + +hg log -r ':' --template '{node|short} {desc|strip|firstline}\n' +cmp stdout .hg-log + +-- .hg-log -- +9dc9138de2e5 initial commit +ee0106da3c7c add X +d6ad170f61d4 gofmt +90c54d4351ee Added tag v1.0.0 for changeset d6ad170f61d4 +c6260ab8dc3e X->XX +e64782fcadfd gofmt +d6ad604046f6 Added tag v2.0.0 for changeset e64782fcadfd +663753d3ac63 add XXX +4555a6dd66c0 comment +90da67a9bf0c prerelease +d7c15fbd635d Added tag v1.1.0-pre for changeset 90da67a9bf0c +accb169a3696 working +07462d11385f v1.2.0 +ed9a22ebb8a1 Added tag v1.2.0 for changeset 07462d11385f +498b291aa133 more +2840708d1294 Added tag morework for changeset 498b291aa133 +-- x_1abc5ff.go -- +package legacytest + +// add comment +const X = 1 +-- x_52853eb.go -- +package legacytest +const X = 1 +-- x_7303f77.go -- +package legacytest + +const XX = 2 + +const XXX = 3 +-- x_731e3b1.go -- +package legacytest + +// add comment again +const X = 1 +-- x_7fff7f3.go -- +package legacytest + +const X = 1 +-- x_9f6f860.go -- +package legacytest + +// add comment again!!! +const X = 1 +-- x_cf92c7b.go -- +package legacytest +-- x_d2d4c3e.go -- +package legacytest + +// add comment hack hack hack +const X = 1 +-- x_d7ae1e4.go -- +package legacytest + +const XX = 2 +-- x_fa4f5d6.go -- +package legacytest +const XX = 2 +-- x_fb3c628.go -- +package legacytest + +// add comment fixed +const X = 1 diff --git a/src/cmd/go/testdata/vcstest/hg/prefixtagtests.txt b/src/cmd/go/testdata/vcstest/hg/prefixtagtests.txt new file mode 100644 index 00000000000..c61c9bacae9 --- /dev/null +++ b/src/cmd/go/testdata/vcstest/hg/prefixtagtests.txt @@ -0,0 +1,52 @@ +env date=2019-05-09T18:35:00-04:00 + +handle hg + +hg init +hg add sub +hg commit -u rsc -d $date -m 'create module sub' + +echo v0.1.0 +cp stdout status +hg add status +hg commit -u rsc -d $date -m v0.1.0 +hg tag -u rsc -d $date v0.1.0 + +echo sub/v0.0.9 +cp stdout status +hg add status +hg commit -u rsc -d $date -m sub/v0.0.9 +hg tag -u rsc -d $date sub/v0.0.9 + +echo sub/v0.0.10 +cp stdout status +hg commit -u rsc -d $date -m sub/v0.0.10 status +hg tag -u rsc -d $date sub/v0.0.10 + +echo v0.2.0 +cp stdout status +hg commit -u rsc -d $date -m v0.2.0 +hg tag -u rsc -d $date v0.2.0 + +echo 'after last tag' +cp stdout status +hg commit -u rsc -d $date -m 'after last tag' + +hg tags +cmp stdout .hg-tags + +hg branches +cmp stdout .hg-branches + +-- .hg-tags -- +tip 9:840814f739c2 +v0.2.0 7:84e452ea2b0a +sub/v0.0.10 5:1cc0dfcc254c +sub/v0.0.9 3:c5f5e3168705 +v0.1.0 1:d6ba12969a9b +-- .hg-branches -- +default 9:840814f739c2 +-- sub/go.mod -- +module vcs-test.golang.org/git/prefixtagtests.git/sub +-- sub/sub.go -- +package sub diff --git a/src/cmd/go/testdata/vcstest/hg/tagtests.txt b/src/cmd/go/testdata/vcstest/hg/tagtests.txt new file mode 100644 index 00000000000..38b3e97ef9b --- /dev/null +++ b/src/cmd/go/testdata/vcstest/hg/tagtests.txt @@ -0,0 +1,38 @@ +env date=2019-05-09T18:56:25-04:00 + +handle hg + +hg init +hg add go.mod tagtests.go +hg commit --user 'rsc' --date $date -m 'create module tagtests' +hg branch b +hg add v0.2.1 +hg commit --user 'rsc' --date $date -m 'v0.2.1' +hg tag --user 'rsc' --date $date v0.2.1 + +hg update default +hg add v0.2.2 +hg commit --user 'rsc' --date $date -m 'v0.2.2' +hg tag --user 'rsc' --date $date v0.2.2 + +hg tags +cmp stdout .hg-tags + +hg branches +cmp stdout .hg-branches + +-- go.mod -- +module vcs-test.golang.org/git/tagtests.git +-- tagtests.go -- +package tagtests +-- v0.2.1 -- +v0.2.1 +-- v0.2.2 -- +v0.2.2 +-- .hg-tags -- +tip 4:8d0b18b816df +v0.2.2 3:1e531550e864 +v0.2.1 1:010a2d1a2ea7 +-- .hg-branches -- +default 4:8d0b18b816df +b 2:ceae444ffda5 diff --git a/src/cmd/gofmt/gofmt.go b/src/cmd/gofmt/gofmt.go index bbb8b4fd15c..ad6ad636524 100644 --- a/src/cmd/gofmt/gofmt.go +++ b/src/cmd/gofmt/gofmt.go @@ -41,6 +41,9 @@ var ( // debugging cpuprofile = flag.String("cpuprofile", "", "write cpu profile to this file") + + // errors + errFormattingDiffers = fmt.Errorf("formatting differs from gofmt's") ) // Keep these in sync with go/format/format.go. @@ -218,8 +221,12 @@ func (r *reporter) Report(err error) { panic("Report with nil error") } st := r.getState() - scanner.PrintError(st.err, err) - st.exitCode = 2 + if err == errFormattingDiffers { + st.exitCode = 1 + } else { + scanner.PrintError(st.err, err) + st.exitCode = 2 + } } func (r *reporter) ExitCode() int { @@ -281,6 +288,7 @@ func processFile(filename string, info fs.FileInfo, in io.Reader, r *reporter) e newName := filepath.ToSlash(filename) oldName := newName + ".orig" r.Write(diff.Diff(oldName, src, newName, res)) + return errFormattingDiffers } } diff --git a/src/cmd/gofmt/gofmt_test.go b/src/cmd/gofmt/gofmt_test.go index 6b80673af14..2aba0f03ff0 100644 --- a/src/cmd/gofmt/gofmt_test.go +++ b/src/cmd/gofmt/gofmt_test.go @@ -53,10 +53,19 @@ func gofmtFlags(filename string, maxLines int) string { return "" } -func runTest(t *testing.T, in, out string) { - // process flags - *simplifyAST = false +// Reset global variables for all flags to their default value. +func resetFlags() { + *list = false + *write = false *rewriteRule = "" + *simplifyAST = false + *doDiff = false + *allErrors = false + *cpuprofile = "" +} + +func runTest(t *testing.T, in, out string) { + resetFlags() info, err := os.Lstat(in) if err != nil { t.Error(err) @@ -159,6 +168,46 @@ func TestRewrite(t *testing.T) { } } +// TestDiff runs gofmt with the -d flag on the input files and checks that the +// expected exit code is set. +func TestDiff(t *testing.T) { + tests := []struct { + in string + exitCode int + }{ + {in: "testdata/exitcode.input", exitCode: 1}, + {in: "testdata/exitcode.golden", exitCode: 0}, + } + + for _, tt := range tests { + resetFlags() + *doDiff = true + + initParserMode() + initRewrite() + + info, err := os.Lstat(tt.in) + if err != nil { + t.Error(err) + return + } + + const maxWeight = 2 << 20 + var buf, errBuf bytes.Buffer + s := newSequencer(maxWeight, &buf, &errBuf) + s.Add(fileWeight(tt.in, info), func(r *reporter) error { + return processFile(tt.in, info, nil, r) + }) + if errBuf.Len() > 0 { + t.Logf("%q", errBuf.Bytes()) + } + + if s.GetExitCode() != tt.exitCode { + t.Errorf("%s: expected exit code %d, got %d", tt.in, tt.exitCode, s.GetExitCode()) + } + } +} + // Test case for issue 3961. func TestCRLF(t *testing.T) { const input = "testdata/crlf.input" // must contain CR/LF's diff --git a/src/cmd/gofmt/testdata/exitcode.golden b/src/cmd/gofmt/testdata/exitcode.golden new file mode 100644 index 00000000000..06ab7d0f9a3 --- /dev/null +++ b/src/cmd/gofmt/testdata/exitcode.golden @@ -0,0 +1 @@ +package main diff --git a/src/cmd/gofmt/testdata/exitcode.input b/src/cmd/gofmt/testdata/exitcode.input new file mode 100644 index 00000000000..4f2f092ce50 --- /dev/null +++ b/src/cmd/gofmt/testdata/exitcode.input @@ -0,0 +1 @@ + package main diff --git a/src/cmd/internal/bootstrap_test/experiment_toolid_test.go b/src/cmd/internal/bootstrap_test/experiment_toolid_test.go index ff2379c8998..ca292b70086 100644 --- a/src/cmd/internal/bootstrap_test/experiment_toolid_test.go +++ b/src/cmd/internal/bootstrap_test/experiment_toolid_test.go @@ -97,7 +97,7 @@ func runCmd(t *testing.T, dir string, env []string, path string, args ...string) cmd.Env = env out, err := cmd.Output() if err != nil { - if ee := (*exec.ExitError)(nil); errors.As(err, &ee) { + if ee, ok := errors.AsType[*exec.ExitError](err); ok { out = append(out, ee.Stderr...) } t.Fatalf("%s failed:\n%s\n%s", cmd, out, err) diff --git a/src/cmd/internal/buildid/rewrite.go b/src/cmd/internal/buildid/rewrite.go index 5300f7f9b88..cee40b0116d 100644 --- a/src/cmd/internal/buildid/rewrite.go +++ b/src/cmd/internal/buildid/rewrite.go @@ -213,7 +213,7 @@ func findHostBuildID(r io.Reader) (offset int64, size int64, ok bool) { if cmd.Cmd == imacho.LC_UUID { // The UUID is the data in the LC_UUID load command, // skipping over the 8-byte command header. - return int64(reader.Offset() + 8), int64(cmd.Len - 8), true + return reader.Offset() + 8, int64(cmd.Len - 8), true } } return 0, 0, false diff --git a/src/cmd/internal/cov/readcovdata.go b/src/cmd/internal/cov/readcovdata.go index e0e06344595..f9fd5229307 100644 --- a/src/cmd/internal/cov/readcovdata.go +++ b/src/cmd/internal/cov/readcovdata.go @@ -145,14 +145,14 @@ func (r *CovDataReader) Visit() error { return nil } -func (r *CovDataReader) verb(vlevel int, s string, a ...interface{}) { +func (r *CovDataReader) verb(vlevel int, s string, a ...any) { if r.verbosityLevel >= vlevel { fmt.Fprintf(os.Stderr, s, a...) fmt.Fprintf(os.Stderr, "\n") } } -func (r *CovDataReader) warn(s string, a ...interface{}) { +func (r *CovDataReader) warn(s string, a ...any) { fmt.Fprintf(os.Stderr, "warning: ") fmt.Fprintf(os.Stderr, s, a...) fmt.Fprintf(os.Stderr, "\n") @@ -161,7 +161,7 @@ func (r *CovDataReader) warn(s string, a ...interface{}) { } } -func (r *CovDataReader) fatal(s string, a ...interface{}) error { +func (r *CovDataReader) fatal(s string, a ...any) error { if r.err != nil { return nil } diff --git a/src/cmd/internal/dwarf/dwarf.go b/src/cmd/internal/dwarf/dwarf.go index 6e06f139b03..b8956b4cffe 100644 --- a/src/cmd/internal/dwarf/dwarf.go +++ b/src/cmd/internal/dwarf/dwarf.go @@ -40,8 +40,7 @@ const AbstractFuncSuffix = "$abstract" var logDwarf bool // Sym represents a symbol. -type Sym interface { -} +type Sym any // A Var represents a local variable or a function parameter. type Var struct { @@ -194,16 +193,16 @@ type Context interface { Size(s Sym) int64 AddInt(s Sym, size int, i int64) AddBytes(s Sym, b []byte) - AddAddress(s Sym, t interface{}, ofs int64) - AddCURelativeAddress(s Sym, t interface{}, ofs int64) - AddSectionOffset(s Sym, size int, t interface{}, ofs int64) - AddDWARFAddrSectionOffset(s Sym, t interface{}, ofs int64) - AddIndirectTextRef(s Sym, t interface{}) + AddAddress(s Sym, t any, ofs int64) + AddCURelativeAddress(s Sym, t any, ofs int64) + AddSectionOffset(s Sym, size int, t any, ofs int64) + AddDWARFAddrSectionOffset(s Sym, t any, ofs int64) + AddIndirectTextRef(s Sym, t any) CurrentOffset(s Sym) int64 RecordDclReference(from Sym, to Sym, dclIdx int, inlIndex int) RecordChildDieOffsets(s Sym, vars []*Var, offsets []int32) AddString(s Sym, v string) - Logf(format string, args ...interface{}) + Logf(format string, args ...any) } // AppendUleb128 appends v to b using DWARF's unsigned LEB128 encoding. @@ -874,7 +873,7 @@ type DWAttr struct { Atr uint16 // DW_AT_ Cls uint8 // DW_CLS_ Value int64 - Data interface{} + Data any } // DWDie represents a DWARF debug info entry. @@ -886,7 +885,7 @@ type DWDie struct { Sym Sym } -func putattr(ctxt Context, s Sym, abbrev int, form int, cls int, value int64, data interface{}) error { +func putattr(ctxt Context, s Sym, abbrev int, form int, cls int, value int64, data any) error { switch form { case DW_FORM_addr: // address // Allow nil addresses for DW_AT_go_runtime_type. diff --git a/src/cmd/internal/goobj/builtinlist.go b/src/cmd/internal/goobj/builtinlist.go index 9e21544391b..b3320808f11 100644 --- a/src/cmd/internal/goobj/builtinlist.go +++ b/src/cmd/internal/goobj/builtinlist.go @@ -35,11 +35,13 @@ var builtins = [...]struct { {"runtime.goPanicSlice3CU", 1}, {"runtime.goPanicSliceConvert", 1}, {"runtime.printbool", 1}, - {"runtime.printfloat", 1}, + {"runtime.printfloat64", 1}, + {"runtime.printfloat32", 1}, {"runtime.printint", 1}, {"runtime.printhex", 1}, {"runtime.printuint", 1}, - {"runtime.printcomplex", 1}, + {"runtime.printcomplex128", 1}, + {"runtime.printcomplex64", 1}, {"runtime.printstring", 1}, {"runtime.printpointer", 1}, {"runtime.printuintptr", 1}, @@ -109,13 +111,11 @@ var builtins = [...]struct { {"runtime.mapassign_fast64", 1}, {"runtime.mapassign_fast64ptr", 1}, {"runtime.mapassign_faststr", 1}, - {"runtime.mapiterinit", 1}, {"runtime.mapIterStart", 1}, {"runtime.mapdelete", 1}, {"runtime.mapdelete_fast32", 1}, {"runtime.mapdelete_fast64", 1}, {"runtime.mapdelete_faststr", 1}, - {"runtime.mapiternext", 1}, {"runtime.mapIterNext", 1}, {"runtime.mapclear", 1}, {"runtime.makechan64", 1}, diff --git a/src/cmd/internal/goobj/objfile.go b/src/cmd/internal/goobj/objfile.go index 38da67076d5..cca3c840e0e 100644 --- a/src/cmd/internal/goobj/objfile.go +++ b/src/cmd/internal/goobj/objfile.go @@ -727,7 +727,7 @@ func (r *Reader) NNonpkgref() int { // SymOff returns the offset of the i-th symbol. func (r *Reader) SymOff(i uint32) uint32 { - return r.h.Offsets[BlkSymdef] + uint32(i*SymSize) + return r.h.Offsets[BlkSymdef] + i*SymSize } // Sym returns a pointer to the i-th symbol. @@ -752,7 +752,7 @@ func (r *Reader) RefFlags(i int) *RefFlags { // Note: here i is the index of short hashed symbols, not all symbols // (unlike other accessors). func (r *Reader) Hash64(i uint32) uint64 { - off := r.h.Offsets[BlkHash64] + uint32(i*Hash64Size) + off := r.h.Offsets[BlkHash64] + i*Hash64Size return r.uint64At(off) } @@ -760,19 +760,19 @@ func (r *Reader) Hash64(i uint32) uint64 { // Note: here i is the index of hashed symbols, not all symbols // (unlike other accessors). func (r *Reader) Hash(i uint32) *HashType { - off := r.h.Offsets[BlkHash] + uint32(i*HashSize) + off := r.h.Offsets[BlkHash] + i*HashSize return (*HashType)(unsafe.Pointer(&r.b[off])) } // NReloc returns the number of relocations of the i-th symbol. func (r *Reader) NReloc(i uint32) int { - relocIdxOff := r.h.Offsets[BlkRelocIdx] + uint32(i*4) + relocIdxOff := r.h.Offsets[BlkRelocIdx] + i*4 return int(r.uint32At(relocIdxOff+4) - r.uint32At(relocIdxOff)) } // RelocOff returns the offset of the j-th relocation of the i-th symbol. func (r *Reader) RelocOff(i uint32, j int) uint32 { - relocIdxOff := r.h.Offsets[BlkRelocIdx] + uint32(i*4) + relocIdxOff := r.h.Offsets[BlkRelocIdx] + i*4 relocIdx := r.uint32At(relocIdxOff) return r.h.Offsets[BlkReloc] + (relocIdx+uint32(j))*uint32(RelocSize) } diff --git a/src/cmd/internal/macho/macho.go b/src/cmd/internal/macho/macho.go index ad29c32c50a..6c9907dbb48 100644 --- a/src/cmd/internal/macho/macho.go +++ b/src/cmd/internal/macho/macho.go @@ -100,7 +100,7 @@ func (r *LoadCmdReader) Next() (LoadCmd, error) { return cmd, nil } -func (r LoadCmdReader) ReadAt(offset int64, data interface{}) error { +func (r LoadCmdReader) ReadAt(offset int64, data any) error { if _, err := r.f.Seek(r.offset+offset, 0); err != nil { return err } @@ -117,7 +117,7 @@ func NewLoadCmdUpdater(f io.ReadWriteSeeker, order binary.ByteOrder, nextOffset return LoadCmdUpdater{NewLoadCmdReader(f, order, nextOffset)} } -func (u LoadCmdUpdater) WriteAt(offset int64, data interface{}) error { +func (u LoadCmdUpdater) WriteAt(offset int64, data any) error { if _, err := u.f.Seek(u.offset+offset, 0); err != nil { return err } diff --git a/src/cmd/internal/obj/arm/a.out.go b/src/cmd/internal/obj/arm/a.out.go index fabd0cb50f4..d33b75120d2 100644 --- a/src/cmd/internal/obj/arm/a.out.go +++ b/src/cmd/internal/obj/arm/a.out.go @@ -115,7 +115,7 @@ var ARMDWARFRegisters = map[int16]int16{} func init() { // f assigns dwarfregisters[from:to] = (base):(step*(to-from)+base) f := func(from, to, base, step int16) { - for r := int16(from); r <= to; r++ { + for r := from; r <= to; r++ { ARMDWARFRegisters[r] = step*(r-from) + base } } diff --git a/src/cmd/internal/obj/arm/asm5.go b/src/cmd/internal/obj/arm/asm5.go index 0ef13b81f6f..1e2891de0a7 100644 --- a/src/cmd/internal/obj/arm/asm5.go +++ b/src/cmd/internal/obj/arm/asm5.go @@ -579,7 +579,7 @@ func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { } if int64(pc) > p.Pc { - ctxt.Diag("PC padding invalid: want %#d, has %#d: %v", p.Pc, pc, p) + ctxt.Diag("PC padding invalid: want %d, has %d: %v", p.Pc, pc, p) } for int64(pc) != p.Pc { // emit 0xe1a00000 (MOVW R0, R0) diff --git a/src/cmd/internal/obj/arm64/a.out.go b/src/cmd/internal/obj/arm64/a.out.go index 710dd64b304..814dba2c100 100644 --- a/src/cmd/internal/obj/arm64/a.out.go +++ b/src/cmd/internal/obj/arm64/a.out.go @@ -1020,6 +1020,12 @@ const ( AWORD AYIELD ABTI + APACIASP + AAUTIASP + APACIBSP + AAUTIBSP + AAUTIA1716 + AAUTIB1716 ALAST AB = obj.AJMP ABL = obj.ACALL diff --git a/src/cmd/internal/obj/arm64/anames.go b/src/cmd/internal/obj/arm64/anames.go index 379f53bab37..497429d9985 100644 --- a/src/cmd/internal/obj/arm64/anames.go +++ b/src/cmd/internal/obj/arm64/anames.go @@ -537,5 +537,11 @@ var Anames = []string{ "WORD", "YIELD", "BTI", + "PACIASP", + "AUTIASP", + "PACIBSP", + "AUTIBSP", + "AUTIA1716", + "AUTIB1716", "LAST", } diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go index 743d09a3190..ccf8eda495a 100644 --- a/src/cmd/internal/obj/arm64/asm7.go +++ b/src/cmd/internal/obj/arm64/asm7.go @@ -34,9 +34,11 @@ import ( "cmd/internal/obj" "cmd/internal/objabi" "encoding/binary" + "errors" "fmt" "log" "math" + "math/bits" "slices" "strings" ) @@ -1162,7 +1164,7 @@ func span7(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { switch p.As { case obj.APCALIGN, obj.APCALIGNMAX: v := obj.AlignmentPaddingLength(int32(p.Pc), p, c.ctxt) - for i := 0; i < int(v/4); i++ { + for i := 0; i < v/4; i++ { // emit ANOOP instruction by the padding size buf.emit(OP_NOOP) } @@ -1975,7 +1977,18 @@ func (c *ctxt7) con64class(a *obj.Addr) int { return C_MOVCON } else if zeroCount == 2 || negCount == 2 { return C_MOVCON2 - } else if zeroCount == 1 || negCount == 1 { + } + // See omovlconst for description of this loop. + for i := 0; i < 4; i++ { + mask := uint64(0xffff) << (i * 16) + for period := 2; period <= 32; period *= 2 { + x := uint64(a.Offset)&^mask | bits.RotateLeft64(uint64(a.Offset), max(period, 16))&mask + if isbitcon(x) { + return C_MOVCON2 + } + } + } + if zeroCount == 1 || negCount == 1 { return C_MOVCON3 } else { return C_VCON @@ -3017,6 +3030,13 @@ func buildop(ctxt *obj.Link) { oprangeset(ANOOP, t) oprangeset(ADRPS, t) + oprangeset(APACIASP, t) + oprangeset(AAUTIASP, t) + oprangeset(APACIBSP, t) + oprangeset(AAUTIBSP, t) + oprangeset(AAUTIA1716, t) + oprangeset(AAUTIB1716, t) + case ACBZ: oprangeset(ACBZW, t) oprangeset(ACBNZ, t) @@ -4009,7 +4029,7 @@ func (c *ctxt7) asmout(p *obj.Prog, out []uint32) (count int) { // Handle smaller unaligned and negative offsets via addition or subtraction. if v >= -4095 && v <= 4095 { - o1 = c.oaddi12(p, v, REGTMP, int16(rt)) + o1 = c.oaddi12(p, v, REGTMP, rt) o2 = c.olsr12u(p, c.opstr(p, p.As), 0, REGTMP, rf) break } @@ -4065,7 +4085,7 @@ func (c *ctxt7) asmout(p *obj.Prog, out []uint32) (count int) { // Handle smaller unaligned and negative offsets via addition or subtraction. if v >= -4095 && v <= 4095 { - o1 = c.oaddi12(p, v, REGTMP, int16(rf)) + o1 = c.oaddi12(p, v, REGTMP, rf) o2 = c.olsr12u(p, c.opldr(p, p.As), 0, REGTMP, rt) break } @@ -4354,7 +4374,7 @@ func (c *ctxt7) asmout(p *obj.Prog, out []uint32) (count int) { // remove the NOTUSETMP flag in optab. op := c.opirr(p, p.As) if op&Sbit != 0 { - c.ctxt.Diag("can not break addition/subtraction when S bit is set", p) + c.ctxt.Diag("can not break addition/subtraction when S bit is set (%v)", p) } rt, r := p.To.Reg, p.Reg if r == obj.REG_NONE { @@ -4844,7 +4864,7 @@ func (c *ctxt7) asmout(p *obj.Prog, out []uint32) (count int) { if p.Pool != nil { c.ctxt.Diag("%v: unused constant in pool (%v)\n", p, v) } - o1 = c.oaddi(p, AADD, lo, REGTMP, int16(rf)) + o1 = c.oaddi(p, AADD, lo, REGTMP, rf) o2 = c.oaddi(p, AADD, hi, REGTMP, REGTMP) o3 = c.opldpstp(p, o, 0, REGTMP, rt1, rt2, 1) break @@ -4909,7 +4929,7 @@ func (c *ctxt7) asmout(p *obj.Prog, out []uint32) (count int) { if p.Pool != nil { c.ctxt.Diag("%v: unused constant in pool (%v)\n", p, v) } - o1 = c.oaddi(p, AADD, lo, REGTMP, int16(rt)) + o1 = c.oaddi(p, AADD, lo, REGTMP, rt) o2 = c.oaddi(p, AADD, hi, REGTMP, REGTMP) o3 = c.opldpstp(p, o, 0, REGTMP, rf1, rf2, 0) break @@ -5285,7 +5305,7 @@ func (c *ctxt7) asmout(p *obj.Prog, out []uint32) (count int) { } o1 = c.opirr(p, p.As) - o1 |= (uint32(r&31) << 5) | (uint32((imm>>3)&0xfff) << 10) | (uint32(v & 31)) + o1 |= (uint32(r&31) << 5) | ((imm >> 3) & 0xfff << 10) | (v & 31) case 92: /* vmov Vn.[index], Vd.[index] */ rf := int(p.From.Reg) @@ -5838,7 +5858,7 @@ func (c *ctxt7) asmout(p *obj.Prog, out []uint32) (count int) { out[3] = o4 out[4] = o5 - return int(o.size(c.ctxt, p) / 4) + return o.size(c.ctxt, p) / 4 } func (c *ctxt7) addrRelocType(p *obj.Prog) objabi.RelocType { @@ -7016,6 +7036,24 @@ func (c *ctxt7) op0(p *obj.Prog, a obj.As) uint32 { case ASEVL: return SYSHINT(5) + + case APACIASP: + return SYSHINT(25) + + case AAUTIASP: + return SYSHINT(29) + + case APACIBSP: + return SYSHINT(27) + + case AAUTIBSP: + return SYSHINT(31) + + case AAUTIA1716: + return SYSHINT(12) + + case AAUTIB1716: + return SYSHINT(14) } c.ctxt.Diag("%v: bad op0 %v", p, a) @@ -7250,6 +7288,8 @@ func (c *ctxt7) opldrr(p *obj.Prog, a obj.As, rt, rn, rm int16, extension bool) op = OptionS<<10 | 0x3<<21 | 0x17<<27 | 1<<26 case AFMOVD: op = OptionS<<10 | 0x3<<21 | 0x1f<<27 | 1<<26 + case AFMOVQ: + op = OptionS<<10 | 0x7<<21 | 0x07<<27 | 1<<26 default: c.ctxt.Diag("bad opldrr %v\n%v", a, p) return 0 @@ -7282,6 +7322,8 @@ func (c *ctxt7) opstrr(p *obj.Prog, a obj.As, rt, rn, rm int16, extension bool) op = OptionS<<10 | 0x1<<21 | 0x17<<27 | 1<<26 case AFMOVD: op = OptionS<<10 | 0x1<<21 | 0x1f<<27 | 1<<26 + case AFMOVQ: + op = OptionS<<10 | 0x5<<21 | 0x07<<27 | 1<<26 default: c.ctxt.Diag("bad opstrr %v\n%v", a, p) return 0 @@ -7525,6 +7567,31 @@ func (c *ctxt7) omovlconst(as obj.As, p *obj.Prog, a *obj.Addr, rt int, os []uin } } return 2 + } + + // Look for a two instruction pair, a bit pattern encodeable + // as a bitcon immediate plus a fixup MOVK instruction. + // Constants like this often occur from strength reduction of divides. + for i = 0; i < 4; i++ { + mask := uint64(0xffff) << (i * 16) + for period := 2; period <= 32; period *= 2 { // TODO: handle period==64 somehow? + // Copy in bits from outside of the masked region + x := uint64(d)&^mask | bits.RotateLeft64(uint64(d), max(period, 16))&mask + if isbitcon(x) { + // ORR $c1, ZR, rt + os[0] = c.opirr(p, AORR) + os[0] |= bitconEncode(x, 64) | uint32(REGZERO&31)<<5 | uint32(rt&31) + // MOVK $c2<<(i*16), rt + os[1] = c.opirr(p, AMOVK) + os[1] |= MOVCONST(d, i, rt) + return 2 + } + } + } + // TODO: other fixups, like ADD or SUB? + // TODO: 3-instruction variant, instead of the full MOVD+3*MOVK version below? + + switch { case zeroCount == 1: // one MOVZ and two MOVKs @@ -7828,5 +7895,148 @@ func (c *ctxt7) encRegShiftOrExt(p *obj.Prog, a *obj.Addr, r int16) uint32 { // pack returns the encoding of the "Q" field and two arrangement specifiers. func pack(q uint32, arngA, arngB uint8) uint32 { - return uint32(q)<<16 | uint32(arngA)<<8 | uint32(arngB) + return q<<16 | uint32(arngA)<<8 | uint32(arngB) +} + +// ARM64RegisterExtension constructs an ARM64 register with extension or arrangement. +func ARM64RegisterExtension(a *obj.Addr, ext string, reg, num int16, isAmount, isIndex bool) error { + Rnum := (reg & 31) + num<<5 + if isAmount { + if num < 0 || num > 7 { + return errors.New("index shift amount is out of range") + } + } + if reg <= REG_R31 && reg >= REG_R0 { + if !isAmount { + return errors.New("invalid register extension") + } + switch ext { + case "UXTB": + if a.Type == obj.TYPE_MEM { + return errors.New("invalid shift for the register offset addressing mode") + } + a.Reg = REG_UXTB + Rnum + case "UXTH": + if a.Type == obj.TYPE_MEM { + return errors.New("invalid shift for the register offset addressing mode") + } + a.Reg = REG_UXTH + Rnum + case "UXTW": + // effective address of memory is a base register value and an offset register value. + if a.Type == obj.TYPE_MEM { + a.Index = REG_UXTW + Rnum + } else { + a.Reg = REG_UXTW + Rnum + } + case "UXTX": + if a.Type == obj.TYPE_MEM { + return errors.New("invalid shift for the register offset addressing mode") + } + a.Reg = REG_UXTX + Rnum + case "SXTB": + if a.Type == obj.TYPE_MEM { + return errors.New("invalid shift for the register offset addressing mode") + } + a.Reg = REG_SXTB + Rnum + case "SXTH": + if a.Type == obj.TYPE_MEM { + return errors.New("invalid shift for the register offset addressing mode") + } + a.Reg = REG_SXTH + Rnum + case "SXTW": + if a.Type == obj.TYPE_MEM { + a.Index = REG_SXTW + Rnum + } else { + a.Reg = REG_SXTW + Rnum + } + case "SXTX": + if a.Type == obj.TYPE_MEM { + a.Index = REG_SXTX + Rnum + } else { + a.Reg = REG_SXTX + Rnum + } + case "LSL": + a.Index = REG_LSL + Rnum + default: + return errors.New("unsupported general register extension type: " + ext) + + } + } else if reg <= REG_V31 && reg >= REG_V0 { + switch ext { + case "B8": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = REG_ARNG + (reg & 31) + ((ARNG_8B & 15) << 5) + case "B16": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = REG_ARNG + (reg & 31) + ((ARNG_16B & 15) << 5) + case "H4": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = REG_ARNG + (reg & 31) + ((ARNG_4H & 15) << 5) + case "H8": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = REG_ARNG + (reg & 31) + ((ARNG_8H & 15) << 5) + case "S2": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = REG_ARNG + (reg & 31) + ((ARNG_2S & 15) << 5) + case "S4": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = REG_ARNG + (reg & 31) + ((ARNG_4S & 15) << 5) + case "D1": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = REG_ARNG + (reg & 31) + ((ARNG_1D & 15) << 5) + case "D2": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = REG_ARNG + (reg & 31) + ((ARNG_2D & 15) << 5) + case "Q1": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = REG_ARNG + (reg & 31) + ((ARNG_1Q & 15) << 5) + case "B": + if !isIndex { + return nil + } + a.Reg = REG_ELEM + (reg & 31) + ((ARNG_B & 15) << 5) + a.Index = num + case "H": + if !isIndex { + return nil + } + a.Reg = REG_ELEM + (reg & 31) + ((ARNG_H & 15) << 5) + a.Index = num + case "S": + if !isIndex { + return nil + } + a.Reg = REG_ELEM + (reg & 31) + ((ARNG_S & 15) << 5) + a.Index = num + case "D": + if !isIndex { + return nil + } + a.Reg = REG_ELEM + (reg & 31) + ((ARNG_D & 15) << 5) + a.Index = num + default: + return errors.New("unsupported simd register extension type: " + ext) + } + } else { + return errors.New("invalid register and extension combination") + } + return nil } diff --git a/src/cmd/internal/obj/arm64/asm_arm64_test.go b/src/cmd/internal/obj/arm64/asm_arm64_test.go index 83d137a0846..b83db60b40f 100644 --- a/src/cmd/internal/obj/arm64/asm_arm64_test.go +++ b/src/cmd/internal/obj/arm64/asm_arm64_test.go @@ -38,3 +38,16 @@ func TestMOVK(t *testing.T) { t.Errorf("Got %x want %x\n", x, want) } } + +func testCombined() (a uint64, b uint64) +func TestCombined(t *testing.T) { + got1, got2 := testCombined() + want1 := uint64(0xaaaaaaaaaaaaaaab) + want2 := uint64(0x0ff019940ff00ff0) + if got1 != want1 { + t.Errorf("First result, got %x want %x", got1, want1) + } + if got2 != want2 { + t.Errorf("First result, got %x want %x", got2, want2) + } +} diff --git a/src/cmd/internal/obj/arm64/asm_arm64_test.s b/src/cmd/internal/obj/arm64/asm_arm64_test.s index e3fda57775f..65d80d1380f 100644 --- a/src/cmd/internal/obj/arm64/asm_arm64_test.s +++ b/src/cmd/internal/obj/arm64/asm_arm64_test.s @@ -37,3 +37,11 @@ TEXT ·testmovk(SB), NOSPLIT, $0-8 MOVK $(40000<<48), R0 MOVD R0, ret+0(FP) RET + +// testCombined() (uint64, uint64) +TEXT ·testCombined(SB), NOSPLIT, $0-16 + MOVD $0xaaaaaaaaaaaaaaab, R0 + MOVD $0x0ff019940ff00ff0, R1 + MOVD R0, a+0(FP) + MOVD R1, b+8(FP) + RET diff --git a/src/cmd/internal/obj/dwarf.go b/src/cmd/internal/obj/dwarf.go index c6f321e3e53..d09aa985f04 100644 --- a/src/cmd/internal/obj/dwarf.go +++ b/src/cmd/internal/obj/dwarf.go @@ -219,7 +219,7 @@ func (c dwCtxt) AddUint16(s dwarf.Sym, i uint16) { c.AddInt(s, 2, int64(i)) } func (c dwCtxt) AddUint8(s dwarf.Sym, i uint8) { - b := []byte{byte(i)} + b := []byte{i} c.AddBytes(s, b) } func (c dwCtxt) AddBytes(s dwarf.Sym, b []byte) { @@ -231,7 +231,7 @@ func (c dwCtxt) AddString(s dwarf.Sym, v string) { ls.WriteString(c.Link, ls.Size, len(v), v) ls.WriteInt(c.Link, ls.Size, 1, 0) } -func (c dwCtxt) AddAddress(s dwarf.Sym, data interface{}, value int64) { +func (c dwCtxt) AddAddress(s dwarf.Sym, data any, value int64) { ls := s.(*LSym) size := c.PtrSize() if data != nil { @@ -241,15 +241,15 @@ func (c dwCtxt) AddAddress(s dwarf.Sym, data interface{}, value int64) { ls.WriteInt(c.Link, ls.Size, size, value) } } -func (c dwCtxt) AddCURelativeAddress(s dwarf.Sym, data interface{}, value int64) { +func (c dwCtxt) AddCURelativeAddress(s dwarf.Sym, data any, value int64) { ls := s.(*LSym) rsym := data.(*LSym) ls.WriteCURelativeAddr(c.Link, ls.Size, rsym, value) } -func (c dwCtxt) AddSectionOffset(s dwarf.Sym, size int, t interface{}, ofs int64) { +func (c dwCtxt) AddSectionOffset(s dwarf.Sym, size int, t any, ofs int64) { panic("should be used only in the linker") } -func (c dwCtxt) AddDWARFAddrSectionOffset(s dwarf.Sym, t interface{}, ofs int64) { +func (c dwCtxt) AddDWARFAddrSectionOffset(s dwarf.Sym, t any, ofs int64) { size := 4 if isDwarf64(c.Link) { size = 8 @@ -284,11 +284,11 @@ func (c dwCtxt) RecordChildDieOffsets(s dwarf.Sym, vars []*dwarf.Var, offsets [] c.Link.DwFixups.RegisterChildDIEOffsets(ls, vars, offsets) } -func (c dwCtxt) Logf(format string, args ...interface{}) { +func (c dwCtxt) Logf(format string, args ...any) { c.Link.Logf(format, args...) } -func (c dwCtxt) AddIndirectTextRef(s dwarf.Sym, t interface{}) { +func (c dwCtxt) AddIndirectTextRef(s dwarf.Sym, t any) { ls := s.(*LSym) tsym := t.(*LSym) // Note the doubling below -- DwTextCount is an estimate and diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index 816fed026f3..85dca33d277 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -211,7 +211,7 @@ type Addr struct { // for TYPE_FCONST, a float64 // for TYPE_BRANCH, a *Prog (optional) // for TYPE_TEXTSIZE, an int32 (optional) - Val interface{} + Val any } type AddrName int8 @@ -464,7 +464,7 @@ type LSym struct { P []byte R []Reloc - Extra *interface{} // *FuncInfo, *VarInfo, *FileInfo, *TypeInfo, or *ItabInfo, if present + Extra *any // *FuncInfo, *VarInfo, *FileInfo, *TypeInfo, or *ItabInfo, if present Pkg string PkgIdx int32 @@ -523,7 +523,7 @@ func (s *LSym) NewFuncInfo() *FuncInfo { panic(fmt.Sprintf("invalid use of LSym - NewFuncInfo with Extra of type %T", *s.Extra)) } f := new(FuncInfo) - s.Extra = new(interface{}) + s.Extra = new(any) *s.Extra = f return f } @@ -547,7 +547,7 @@ func (s *LSym) NewVarInfo() *VarInfo { panic(fmt.Sprintf("invalid use of LSym - NewVarInfo with Extra of type %T", *s.Extra)) } f := new(VarInfo) - s.Extra = new(interface{}) + s.Extra = new(any) *s.Extra = f return f } @@ -574,7 +574,7 @@ func (s *LSym) NewFileInfo() *FileInfo { panic(fmt.Sprintf("invalid use of LSym - NewFileInfo with Extra of type %T", *s.Extra)) } f := new(FileInfo) - s.Extra = new(interface{}) + s.Extra = new(any) *s.Extra = f return f } @@ -591,7 +591,7 @@ func (s *LSym) File() *FileInfo { // A TypeInfo contains information for a symbol // that contains a runtime._type. type TypeInfo struct { - Type interface{} // a *cmd/compile/internal/types.Type + Type any // a *cmd/compile/internal/types.Type } func (s *LSym) NewTypeInfo() *TypeInfo { @@ -599,7 +599,7 @@ func (s *LSym) NewTypeInfo() *TypeInfo { panic(fmt.Sprintf("invalid use of LSym - NewTypeInfo with Extra of type %T", *s.Extra)) } t := new(TypeInfo) - s.Extra = new(interface{}) + s.Extra = new(any) *s.Extra = t return t } @@ -616,7 +616,7 @@ func (s *LSym) TypeInfo() *TypeInfo { // An ItabInfo contains information for a symbol // that contains a runtime.itab. type ItabInfo struct { - Type interface{} // a *cmd/compile/internal/types.Type + Type any // a *cmd/compile/internal/types.Type } func (s *LSym) NewItabInfo() *ItabInfo { @@ -624,7 +624,7 @@ func (s *LSym) NewItabInfo() *ItabInfo { panic(fmt.Sprintf("invalid use of LSym - NewItabInfo with Extra of type %T", *s.Extra)) } t := new(ItabInfo) - s.Extra = new(interface{}) + s.Extra = new(any) *s.Extra = t return t } @@ -753,12 +753,12 @@ func (ft *WasmFuncType) Read(b []byte) { ft.Params = make([]WasmField, readUint32()) for i := range ft.Params { ft.Params[i].Type = WasmFieldType(readByte()) - ft.Params[i].Offset = int64(readInt64()) + ft.Params[i].Offset = readInt64() } ft.Results = make([]WasmField, readUint32()) for i := range ft.Results { ft.Results[i].Type = WasmFieldType(readByte()) - ft.Results[i].Offset = int64(readInt64()) + ft.Results[i].Offset = readInt64() } } @@ -1178,7 +1178,7 @@ type Link struct { DwFixups *DwarfFixupTable DwTextCount int Imports []goobj.ImportedPkg - DiagFunc func(string, ...interface{}) + DiagFunc func(string, ...any) DiagFlush func() DebugInfo func(ctxt *Link, fn *LSym, info *LSym, curfn Func) ([]dwarf.Scope, dwarf.InlCalls) GenAbstractFunc func(fn *LSym) @@ -1216,12 +1216,19 @@ type Link struct { Fingerprint goobj.FingerprintType // fingerprint of symbol indices, to catch index mismatch } -func (ctxt *Link) Diag(format string, args ...interface{}) { +// Assert to vet's printf checker that Link.DiagFunc is a printf-like. +func _(ctxt *Link) { + ctxt.DiagFunc = func(format string, args ...any) { + _ = fmt.Sprintf(format, args...) + } +} + +func (ctxt *Link) Diag(format string, args ...any) { ctxt.Errors++ ctxt.DiagFunc(format, args...) } -func (ctxt *Link) Logf(format string, args ...interface{}) { +func (ctxt *Link) Logf(format string, args ...any) { fmt.Fprintf(ctxt.Bso, format, args...) ctxt.Bso.Flush() } diff --git a/src/cmd/internal/obj/loong64/a.out.go b/src/cmd/internal/obj/loong64/a.out.go index 100e99b1c4f..73f145df146 100644 --- a/src/cmd/internal/obj/loong64/a.out.go +++ b/src/cmd/internal/obj/loong64/a.out.go @@ -236,7 +236,7 @@ var LOONG64DWARFRegisters = map[int16]int16{} func init() { // f assigns dwarfregisters[from:to] = (base):(to-from+base) f := func(from, to, base int16) { - for r := int16(from); r <= to; r++ { + for r := from; r <= to; r++ { LOONG64DWARFRegisters[r] = (r - from) + base } } @@ -249,7 +249,13 @@ func init() { } const ( - BIG = 2046 + BIG_8 = 128 - 2 // FIXME (not sure if -2 is appropriate) + BIG_9 = 256 - 2 + BIG_10 = 512 - 2 + BIG_11 = 1024 - 2 + BIG_12 = 2046 + BIG_16 = 32768 - 2 + BIG_32 = 2147483648 - 2 ) const ( @@ -397,10 +403,16 @@ const ( C_BRAN C_SAUTO C_LAUTO - C_ZOREG - C_SOREG - C_LOREG - C_ROFF // register offset + C_ZOREG // An $0+reg memory op + C_SOREG_8 // An $n+reg memory arg where n is a 8 bit signed offset + C_SOREG_9 // An $n+reg memory arg where n is a 9 bit signed offset + C_SOREG_10 // An $n+reg memory arg where n is a 10 bit signed offset + C_SOREG_11 // An $n+reg memory arg where n is a 11 bit signed offset + C_SOREG_12 // An $n+reg memory arg where n is a 12 bit signed offset + C_SOREG_16 // An $n+reg memory arg where n is a 16 bit signed offset + C_LOREG_32 // An $n+reg memory arg where n is a 32 bit signed offset + C_LOREG_64 // An $n+reg memory arg where n is a 64 bit signed offset + C_ROFF // register offset C_ADDR C_TLS_LE C_TLS_IE @@ -797,6 +809,38 @@ const ( AXVSUBHU AXVSUBWU AXVSUBVU + AVSADDB + AVSADDH + AVSADDW + AVSADDV + AVSSUBB + AVSSUBH + AVSSUBW + AVSSUBV + AVSADDBU + AVSADDHU + AVSADDWU + AVSADDVU + AVSSUBBU + AVSSUBHU + AVSSUBWU + AVSSUBVU + AXVSADDB + AXVSADDH + AXVSADDW + AXVSADDV + AXVSSUBB + AXVSSUBH + AXVSSUBW + AXVSSUBV + AXVSADDBU + AXVSADDHU + AXVSADDWU + AXVSADDVU + AXVSSUBBU + AXVSSUBHU + AXVSSUBWU + AXVSSUBVU // LSX and LASX Bit-manipulation Instructions AVANDB @@ -1103,6 +1147,29 @@ const ( AXVSHUF4IW AXVSHUF4IV + AVSHUFB + AVSHUFH + AVSHUFW + AVSHUFV + AXVSHUFB + AXVSHUFH + AXVSHUFW + AXVSHUFV + + AVPERMIW + AXVPERMIW + AXVPERMIV + AXVPERMIQ + + AVEXTRINSB + AVEXTRINSH + AVEXTRINSW + AVEXTRINSV + AXVEXTRINSB + AXVEXTRINSH + AXVEXTRINSW + AXVEXTRINSV + AVSETEQV AVSETNEV AVSETANYEQB diff --git a/src/cmd/internal/obj/loong64/anames.go b/src/cmd/internal/obj/loong64/anames.go index 422ccbd9b0b..ab85c52a21b 100644 --- a/src/cmd/internal/obj/loong64/anames.go +++ b/src/cmd/internal/obj/loong64/anames.go @@ -306,6 +306,38 @@ var Anames = []string{ "XVSUBHU", "XVSUBWU", "XVSUBVU", + "VSADDB", + "VSADDH", + "VSADDW", + "VSADDV", + "VSSUBB", + "VSSUBH", + "VSSUBW", + "VSSUBV", + "VSADDBU", + "VSADDHU", + "VSADDWU", + "VSADDVU", + "VSSUBBU", + "VSSUBHU", + "VSSUBWU", + "VSSUBVU", + "XVSADDB", + "XVSADDH", + "XVSADDW", + "XVSADDV", + "XVSSUBB", + "XVSSUBH", + "XVSSUBW", + "XVSSUBV", + "XVSADDBU", + "XVSADDHU", + "XVSADDWU", + "XVSADDVU", + "XVSSUBBU", + "XVSSUBHU", + "XVSSUBWU", + "XVSSUBVU", "VANDB", "VORB", "VXORB", @@ -586,6 +618,26 @@ var Anames = []string{ "XVSHUF4IH", "XVSHUF4IW", "XVSHUF4IV", + "VSHUFB", + "VSHUFH", + "VSHUFW", + "VSHUFV", + "XVSHUFB", + "XVSHUFH", + "XVSHUFW", + "XVSHUFV", + "VPERMIW", + "XVPERMIW", + "XVPERMIV", + "XVPERMIQ", + "VEXTRINSB", + "VEXTRINSH", + "VEXTRINSW", + "VEXTRINSV", + "XVEXTRINSB", + "XVEXTRINSH", + "XVEXTRINSW", + "XVEXTRINSV", "VSETEQV", "VSETNEV", "VSETANYEQB", diff --git a/src/cmd/internal/obj/loong64/asm.go b/src/cmd/internal/obj/loong64/asm.go index ca6e2be4aa9..38b075d77e7 100644 --- a/src/cmd/internal/obj/loong64/asm.go +++ b/src/cmd/internal/obj/loong64/asm.go @@ -58,6 +58,8 @@ var optab = []Optab{ {AMOVW, C_REG, C_NONE, C_NONE, C_REG, C_NONE, 1, 4, 0, 0}, {AMOVV, C_REG, C_NONE, C_NONE, C_REG, C_NONE, 1, 4, 0, 0}, + {AVMOVQ, C_VREG, C_NONE, C_NONE, C_VREG, C_NONE, 1, 4, 0, 0}, + {AXVMOVQ, C_XREG, C_NONE, C_NONE, C_XREG, C_NONE, 1, 4, 0, 0}, {AMOVB, C_REG, C_NONE, C_NONE, C_REG, C_NONE, 12, 4, 0, 0}, {AMOVBU, C_REG, C_NONE, C_NONE, C_REG, C_NONE, 12, 4, 0, 0}, {AMOVWU, C_REG, C_NONE, C_NONE, C_REG, C_NONE, 12, 4, 0, 0}, @@ -153,6 +155,8 @@ var optab = []Optab{ {AFMADDF, C_FREG, C_FREG, C_NONE, C_FREG, C_NONE, 37, 4, 0, 0}, {AFMADDF, C_FREG, C_FREG, C_FREG, C_FREG, C_NONE, 37, 4, 0, 0}, + {AVSHUFB, C_VREG, C_VREG, C_VREG, C_VREG, C_NONE, 37, 4, 0, 0}, + {AXVSHUFB, C_XREG, C_XREG, C_XREG, C_XREG, C_NONE, 37, 4, 0, 0}, {AFSEL, C_FCCREG, C_FREG, C_FREG, C_FREG, C_NONE, 33, 4, 0, 0}, {AFSEL, C_FCCREG, C_FREG, C_NONE, C_FREG, C_NONE, 33, 4, 0, 0}, @@ -162,46 +166,41 @@ var optab = []Optab{ {AMOVV, C_REG, C_NONE, C_NONE, C_SAUTO, C_NONE, 7, 4, REGSP, 0}, {AMOVB, C_REG, C_NONE, C_NONE, C_SAUTO, C_NONE, 7, 4, REGSP, 0}, {AMOVBU, C_REG, C_NONE, C_NONE, C_SAUTO, C_NONE, 7, 4, REGSP, 0}, - {AMOVW, C_REG, C_NONE, C_NONE, C_SOREG, C_NONE, 7, 4, REGZERO, 0}, - {AMOVWU, C_REG, C_NONE, C_NONE, C_SOREG, C_NONE, 7, 4, REGZERO, 0}, - {AMOVV, C_REG, C_NONE, C_NONE, C_SOREG, C_NONE, 7, 4, REGZERO, 0}, - {AMOVB, C_REG, C_NONE, C_NONE, C_SOREG, C_NONE, 7, 4, REGZERO, 0}, - {AMOVBU, C_REG, C_NONE, C_NONE, C_SOREG, C_NONE, 7, 4, REGZERO, 0}, - {AVMOVQ, C_VREG, C_NONE, C_NONE, C_SOREG, C_NONE, 7, 4, REGZERO, 0}, - {AXVMOVQ, C_XREG, C_NONE, C_NONE, C_SOREG, C_NONE, 7, 4, REGZERO, 0}, + {AMOVW, C_REG, C_NONE, C_NONE, C_SOREG_12, C_NONE, 7, 4, REGZERO, 0}, + {AMOVWU, C_REG, C_NONE, C_NONE, C_SOREG_12, C_NONE, 7, 4, REGZERO, 0}, + {AMOVV, C_REG, C_NONE, C_NONE, C_SOREG_12, C_NONE, 7, 4, REGZERO, 0}, + {AMOVB, C_REG, C_NONE, C_NONE, C_SOREG_12, C_NONE, 7, 4, REGZERO, 0}, + {AMOVBU, C_REG, C_NONE, C_NONE, C_SOREG_12, C_NONE, 7, 4, REGZERO, 0}, + {AVMOVQ, C_VREG, C_NONE, C_NONE, C_SOREG_12, C_NONE, 7, 4, REGZERO, 0}, + {AXVMOVQ, C_XREG, C_NONE, C_NONE, C_SOREG_12, C_NONE, 7, 4, REGZERO, 0}, {AVMOVQ, C_VREG, C_NONE, C_NONE, C_SAUTO, C_NONE, 7, 4, REGZERO, 0}, {AXVMOVQ, C_XREG, C_NONE, C_NONE, C_SAUTO, C_NONE, 7, 4, REGZERO, 0}, - {ASC, C_REG, C_NONE, C_NONE, C_SOREG, C_NONE, 7, 4, REGZERO, 0}, - {ASCV, C_REG, C_NONE, C_NONE, C_SOREG, C_NONE, 7, 4, REGZERO, 0}, {AMOVW, C_SAUTO, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGSP, 0}, {AMOVWU, C_SAUTO, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGSP, 0}, {AMOVV, C_SAUTO, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGSP, 0}, {AMOVB, C_SAUTO, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGSP, 0}, {AMOVBU, C_SAUTO, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGSP, 0}, - {AMOVW, C_SOREG, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGZERO, 0}, - {AMOVWU, C_SOREG, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGZERO, 0}, - {AMOVV, C_SOREG, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGZERO, 0}, - {AMOVB, C_SOREG, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGZERO, 0}, - {AMOVBU, C_SOREG, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGZERO, 0}, - {AVMOVQ, C_SOREG, C_NONE, C_NONE, C_VREG, C_NONE, 8, 4, REGZERO, 0}, - {AXVMOVQ, C_SOREG, C_NONE, C_NONE, C_XREG, C_NONE, 8, 4, REGZERO, 0}, + {AMOVW, C_SOREG_12, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGZERO, 0}, + {AMOVWU, C_SOREG_12, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGZERO, 0}, + {AMOVV, C_SOREG_12, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGZERO, 0}, + {AMOVB, C_SOREG_12, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGZERO, 0}, + {AMOVBU, C_SOREG_12, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGZERO, 0}, + {AVMOVQ, C_SOREG_12, C_NONE, C_NONE, C_VREG, C_NONE, 8, 4, REGZERO, 0}, + {AXVMOVQ, C_SOREG_12, C_NONE, C_NONE, C_XREG, C_NONE, 8, 4, REGZERO, 0}, {AVMOVQ, C_SAUTO, C_NONE, C_NONE, C_VREG, C_NONE, 8, 4, REGZERO, 0}, {AXVMOVQ, C_SAUTO, C_NONE, C_NONE, C_XREG, C_NONE, 8, 4, REGZERO, 0}, - {ALL, C_SOREG, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGZERO, 0}, - {ALLV, C_SOREG, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGZERO, 0}, {AMOVW, C_REG, C_NONE, C_NONE, C_LAUTO, C_NONE, 35, 12, REGSP, 0}, {AMOVWU, C_REG, C_NONE, C_NONE, C_LAUTO, C_NONE, 35, 12, REGSP, 0}, {AMOVV, C_REG, C_NONE, C_NONE, C_LAUTO, C_NONE, 35, 12, REGSP, 0}, {AMOVB, C_REG, C_NONE, C_NONE, C_LAUTO, C_NONE, 35, 12, REGSP, 0}, {AMOVBU, C_REG, C_NONE, C_NONE, C_LAUTO, C_NONE, 35, 12, REGSP, 0}, - {AMOVW, C_REG, C_NONE, C_NONE, C_LOREG, C_NONE, 35, 12, REGZERO, 0}, - {AMOVWU, C_REG, C_NONE, C_NONE, C_LOREG, C_NONE, 35, 12, REGZERO, 0}, - {AMOVV, C_REG, C_NONE, C_NONE, C_LOREG, C_NONE, 35, 12, REGZERO, 0}, - {AMOVB, C_REG, C_NONE, C_NONE, C_LOREG, C_NONE, 35, 12, REGZERO, 0}, - {AMOVBU, C_REG, C_NONE, C_NONE, C_LOREG, C_NONE, 35, 12, REGZERO, 0}, - {ASC, C_REG, C_NONE, C_NONE, C_LOREG, C_NONE, 35, 12, REGZERO, 0}, + {AMOVW, C_REG, C_NONE, C_NONE, C_LOREG_32, C_NONE, 35, 12, REGZERO, 0}, + {AMOVWU, C_REG, C_NONE, C_NONE, C_LOREG_32, C_NONE, 35, 12, REGZERO, 0}, + {AMOVV, C_REG, C_NONE, C_NONE, C_LOREG_32, C_NONE, 35, 12, REGZERO, 0}, + {AMOVB, C_REG, C_NONE, C_NONE, C_LOREG_32, C_NONE, 35, 12, REGZERO, 0}, + {AMOVBU, C_REG, C_NONE, C_NONE, C_LOREG_32, C_NONE, 35, 12, REGZERO, 0}, {AMOVW, C_REG, C_NONE, C_NONE, C_ADDR, C_NONE, 50, 8, 0, 0}, {AMOVWU, C_REG, C_NONE, C_NONE, C_ADDR, C_NONE, 50, 8, 0, 0}, {AMOVV, C_REG, C_NONE, C_NONE, C_ADDR, C_NONE, 50, 8, 0, 0}, @@ -212,19 +211,20 @@ var optab = []Optab{ {AMOVV, C_REG, C_NONE, C_NONE, C_TLS_LE, C_NONE, 53, 16, 0, 0}, {AMOVB, C_REG, C_NONE, C_NONE, C_TLS_LE, C_NONE, 53, 16, 0, 0}, {AMOVBU, C_REG, C_NONE, C_NONE, C_TLS_LE, C_NONE, 53, 16, 0, 0}, - {AMOVWP, C_REG, C_NONE, C_NONE, C_SOREG, C_NONE, 73, 4, 0, 0}, - {AMOVWP, C_REG, C_NONE, C_NONE, C_LOREG, C_NONE, 73, 4, 0, 0}, + {AMOVWP, C_REG, C_NONE, C_NONE, C_SOREG_16, C_NONE, 73, 4, 0, 0}, + {AMOVWP, C_REG, C_NONE, C_NONE, C_LOREG_32, C_NONE, 73, 12, 0, 0}, + {AMOVWP, C_REG, C_NONE, C_NONE, C_LOREG_64, C_NONE, 73, 24, 0, 0}, {AMOVW, C_LAUTO, C_NONE, C_NONE, C_REG, C_NONE, 36, 12, REGSP, 0}, {AMOVWU, C_LAUTO, C_NONE, C_NONE, C_REG, C_NONE, 36, 12, REGSP, 0}, {AMOVV, C_LAUTO, C_NONE, C_NONE, C_REG, C_NONE, 36, 12, REGSP, 0}, {AMOVB, C_LAUTO, C_NONE, C_NONE, C_REG, C_NONE, 36, 12, REGSP, 0}, {AMOVBU, C_LAUTO, C_NONE, C_NONE, C_REG, C_NONE, 36, 12, REGSP, 0}, - {AMOVW, C_LOREG, C_NONE, C_NONE, C_REG, C_NONE, 36, 12, REGZERO, 0}, - {AMOVWU, C_LOREG, C_NONE, C_NONE, C_REG, C_NONE, 36, 12, REGZERO, 0}, - {AMOVV, C_LOREG, C_NONE, C_NONE, C_REG, C_NONE, 36, 12, REGZERO, 0}, - {AMOVB, C_LOREG, C_NONE, C_NONE, C_REG, C_NONE, 36, 12, REGZERO, 0}, - {AMOVBU, C_LOREG, C_NONE, C_NONE, C_REG, C_NONE, 36, 12, REGZERO, 0}, + {AMOVW, C_LOREG_32, C_NONE, C_NONE, C_REG, C_NONE, 36, 12, REGZERO, 0}, + {AMOVWU, C_LOREG_32, C_NONE, C_NONE, C_REG, C_NONE, 36, 12, REGZERO, 0}, + {AMOVV, C_LOREG_32, C_NONE, C_NONE, C_REG, C_NONE, 36, 12, REGZERO, 0}, + {AMOVB, C_LOREG_32, C_NONE, C_NONE, C_REG, C_NONE, 36, 12, REGZERO, 0}, + {AMOVBU, C_LOREG_32, C_NONE, C_NONE, C_REG, C_NONE, 36, 12, REGZERO, 0}, {AMOVW, C_ADDR, C_NONE, C_NONE, C_REG, C_NONE, 51, 8, 0, 0}, {AMOVWU, C_ADDR, C_NONE, C_NONE, C_REG, C_NONE, 51, 8, 0, 0}, {AMOVV, C_ADDR, C_NONE, C_NONE, C_REG, C_NONE, 51, 8, 0, 0}, @@ -235,8 +235,9 @@ var optab = []Optab{ {AMOVV, C_TLS_LE, C_NONE, C_NONE, C_REG, C_NONE, 54, 16, 0, 0}, {AMOVB, C_TLS_LE, C_NONE, C_NONE, C_REG, C_NONE, 54, 16, 0, 0}, {AMOVBU, C_TLS_LE, C_NONE, C_NONE, C_REG, C_NONE, 54, 16, 0, 0}, - {AMOVWP, C_SOREG, C_NONE, C_NONE, C_REG, C_NONE, 74, 4, 0, 0}, - {AMOVWP, C_LOREG, C_NONE, C_NONE, C_REG, C_NONE, 74, 4, 0, 0}, + {AMOVWP, C_SOREG_16, C_NONE, C_NONE, C_REG, C_NONE, 74, 4, 0, 0}, + {AMOVWP, C_LOREG_32, C_NONE, C_NONE, C_REG, C_NONE, 74, 12, 0, 0}, + {AMOVWP, C_LOREG_64, C_NONE, C_NONE, C_REG, C_NONE, 74, 24, 0, 0}, {AMOVW, C_SACON, C_NONE, C_NONE, C_REG, C_NONE, 3, 4, REGSP, 0}, {AMOVV, C_SACON, C_NONE, C_NONE, C_REG, C_NONE, 3, 4, REGSP, 0}, @@ -333,25 +334,25 @@ var optab = []Optab{ {AMOVF, C_SAUTO, C_NONE, C_NONE, C_FREG, C_NONE, 28, 4, REGSP, 0}, {AMOVD, C_SAUTO, C_NONE, C_NONE, C_FREG, C_NONE, 28, 4, REGSP, 0}, - {AMOVF, C_SOREG, C_NONE, C_NONE, C_FREG, C_NONE, 28, 4, REGZERO, 0}, - {AMOVD, C_SOREG, C_NONE, C_NONE, C_FREG, C_NONE, 28, 4, REGZERO, 0}, + {AMOVF, C_SOREG_12, C_NONE, C_NONE, C_FREG, C_NONE, 28, 4, REGZERO, 0}, + {AMOVD, C_SOREG_12, C_NONE, C_NONE, C_FREG, C_NONE, 28, 4, REGZERO, 0}, {AMOVF, C_LAUTO, C_NONE, C_NONE, C_FREG, C_NONE, 28, 12, REGSP, 0}, {AMOVD, C_LAUTO, C_NONE, C_NONE, C_FREG, C_NONE, 28, 12, REGSP, 0}, - {AMOVF, C_LOREG, C_NONE, C_NONE, C_FREG, C_NONE, 28, 12, REGZERO, 0}, - {AMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, C_NONE, 28, 12, REGZERO, 0}, + {AMOVF, C_LOREG_32, C_NONE, C_NONE, C_FREG, C_NONE, 28, 12, REGZERO, 0}, + {AMOVD, C_LOREG_32, C_NONE, C_NONE, C_FREG, C_NONE, 28, 12, REGZERO, 0}, {AMOVF, C_ADDR, C_NONE, C_NONE, C_FREG, C_NONE, 51, 8, 0, 0}, {AMOVD, C_ADDR, C_NONE, C_NONE, C_FREG, C_NONE, 51, 8, 0, 0}, {AMOVF, C_FREG, C_NONE, C_NONE, C_SAUTO, C_NONE, 29, 4, REGSP, 0}, {AMOVD, C_FREG, C_NONE, C_NONE, C_SAUTO, C_NONE, 29, 4, REGSP, 0}, - {AMOVF, C_FREG, C_NONE, C_NONE, C_SOREG, C_NONE, 29, 4, REGZERO, 0}, - {AMOVD, C_FREG, C_NONE, C_NONE, C_SOREG, C_NONE, 29, 4, REGZERO, 0}, + {AMOVF, C_FREG, C_NONE, C_NONE, C_SOREG_12, C_NONE, 29, 4, REGZERO, 0}, + {AMOVD, C_FREG, C_NONE, C_NONE, C_SOREG_12, C_NONE, 29, 4, REGZERO, 0}, {AMOVF, C_FREG, C_NONE, C_NONE, C_LAUTO, C_NONE, 29, 12, REGSP, 0}, {AMOVD, C_FREG, C_NONE, C_NONE, C_LAUTO, C_NONE, 29, 12, REGSP, 0}, - {AMOVF, C_FREG, C_NONE, C_NONE, C_LOREG, C_NONE, 29, 12, REGZERO, 0}, - {AMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, C_NONE, 29, 12, REGZERO, 0}, + {AMOVF, C_FREG, C_NONE, C_NONE, C_LOREG_32, C_NONE, 29, 12, REGZERO, 0}, + {AMOVD, C_FREG, C_NONE, C_NONE, C_LOREG_32, C_NONE, 29, 12, REGZERO, 0}, {AMOVF, C_FREG, C_NONE, C_NONE, C_ADDR, C_NONE, 50, 8, 0, 0}, {AMOVD, C_FREG, C_NONE, C_NONE, C_ADDR, C_NONE, 50, 8, 0, 0}, @@ -426,11 +427,11 @@ var optab = []Optab{ {AVMOVQ, C_ELEM, C_NONE, C_NONE, C_ARNG, C_NONE, 45, 4, 0, 0}, - {AVMOVQ, C_SOREG, C_NONE, C_NONE, C_ARNG, C_NONE, 46, 4, 0, 0}, - {AXVMOVQ, C_SOREG, C_NONE, C_NONE, C_ARNG, C_NONE, 46, 4, 0, 0}, + {AVMOVQ, C_SOREG_12, C_NONE, C_NONE, C_ARNG, C_NONE, 46, 4, 0, 0}, + {AXVMOVQ, C_SOREG_12, C_NONE, C_NONE, C_ARNG, C_NONE, 46, 4, 0, 0}, - {APRELD, C_SOREG, C_U5CON, C_NONE, C_NONE, C_NONE, 47, 4, 0, 0}, - {APRELDX, C_SOREG, C_DCON, C_U5CON, C_NONE, C_NONE, 48, 20, 0, 0}, + {APRELD, C_SOREG_12, C_U5CON, C_NONE, C_NONE, C_NONE, 47, 4, 0, 0}, + {APRELDX, C_SOREG_16, C_DCON, C_U5CON, C_NONE, C_NONE, 48, 20, 0, 0}, {AALSLV, C_U3CON, C_REG, C_REG, C_REG, C_NONE, 64, 4, 0, 0}, @@ -678,7 +679,7 @@ func span0(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { bp := c.cursym.P var i int32 - var out [5]uint32 + var out [6]uint32 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link { c.pc = p.Pc o = c.oplook(p) @@ -778,7 +779,7 @@ func (c *ctxt0) aclass(a *obj.Addr) int { a.Reg = obj.REG_NONE } c.instoffset = int64(c.autosize) + a.Offset - if c.instoffset >= -BIG && c.instoffset < BIG { + if c.instoffset >= -BIG_12 && c.instoffset < BIG_12 { return C_SAUTO } return C_LAUTO @@ -790,7 +791,7 @@ func (c *ctxt0) aclass(a *obj.Addr) int { a.Reg = obj.REG_NONE } c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize - if c.instoffset >= -BIG && c.instoffset < BIG { + if c.instoffset >= -BIG_12 && c.instoffset < BIG_12 { return C_SAUTO } return C_LAUTO @@ -808,10 +809,23 @@ func (c *ctxt0) aclass(a *obj.Addr) int { if c.instoffset == 0 { return C_ZOREG } - if c.instoffset >= -BIG && c.instoffset < BIG { - return C_SOREG + if c.instoffset >= -BIG_8 && c.instoffset < BIG_8 { + return C_SOREG_8 + } else if c.instoffset >= -BIG_9 && c.instoffset < BIG_9 { + return C_SOREG_9 + } else if c.instoffset >= -BIG_10 && c.instoffset < BIG_10 { + return C_SOREG_10 + } else if c.instoffset >= -BIG_11 && c.instoffset < BIG_11 { + return C_SOREG_11 + } else if c.instoffset >= -BIG_12 && c.instoffset < BIG_12 { + return C_SOREG_12 + } else if c.instoffset >= -BIG_16 && c.instoffset < BIG_16 { + return C_SOREG_16 + } else if c.instoffset >= -BIG_32 && c.instoffset < BIG_32 { + return C_LOREG_32 + } else { + return C_LOREG_64 } - return C_LOREG case obj.NAME_GOTREF: return C_GOTADDR @@ -828,7 +842,7 @@ func (c *ctxt0) aclass(a *obj.Addr) int { case obj.NAME_NONE: c.instoffset = a.Offset if a.Reg != 0 { - if -BIG <= c.instoffset && c.instoffset <= BIG { + if -BIG_12 <= c.instoffset && c.instoffset <= BIG_12 { return C_SACON } if isint32(c.instoffset) { @@ -857,7 +871,7 @@ func (c *ctxt0) aclass(a *obj.Addr) int { a.Reg = obj.REG_NONE } c.instoffset = int64(c.autosize) + a.Offset - if c.instoffset >= -BIG && c.instoffset < BIG { + if c.instoffset >= -BIG_12 && c.instoffset < BIG_12 { return C_SACON } return C_LACON @@ -869,7 +883,7 @@ func (c *ctxt0) aclass(a *obj.Addr) int { a.Reg = obj.REG_NONE } c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize - if c.instoffset >= -BIG && c.instoffset < BIG { + if c.instoffset >= -BIG_12 && c.instoffset < BIG_12 { return C_SACON } return C_LACON @@ -1271,10 +1285,33 @@ func cmp(a int, b int) bool { case C_REG: return b == C_ZCON - case C_LOREG: - return b == C_ZOREG || b == C_SOREG + case C_LOREG_64: + if b == C_ZOREG || b == C_SOREG_8 || + b == C_SOREG_9 || b == C_SOREG_10 || + b == C_SOREG_11 || b == C_SOREG_12 || + b == C_SOREG_16 || b == C_LOREG_32 { + return true + } - case C_SOREG: + case C_LOREG_32: + return cmp(C_SOREG_16, b) + + case C_SOREG_16: + return cmp(C_SOREG_12, b) + + case C_SOREG_12: + return cmp(C_SOREG_11, b) + + case C_SOREG_11: + return cmp(C_SOREG_10, b) + + case C_SOREG_10: + return cmp(C_SOREG_9, b) + + case C_SOREG_9: + return cmp(C_SOREG_8, b) + + case C_SOREG_8: return b == C_ZOREG } @@ -1303,7 +1340,7 @@ func opset(a, b0 obj.As) { func buildop(ctxt *obj.Link) { if ctxt.DiagFunc == nil { - ctxt.DiagFunc = func(format string, args ...interface{}) { + ctxt.DiagFunc = func(format string, args ...any) { log.Printf(format, args...) } } @@ -1453,6 +1490,10 @@ func buildop(ctxt *obj.Link) { case AMOVWP: opset(AMOVVP, r0) + opset(ASC, r0) + opset(ASCV, r0) + opset(ALL, r0) + opset(ALLV, r0) case AMUL: opset(AMULU, r0) @@ -1522,10 +1563,8 @@ func buildop(ctxt *obj.Link) { AMOVWU, AVMOVQ, AXVMOVQ, - ALL, - ALLV, - ASC, - ASCV, + AVSHUFB, + AXVSHUFB, ANEGW, ANEGV, AWORD, @@ -1663,6 +1702,9 @@ func buildop(ctxt *obj.Link) { opset(AVMULD, r0) opset(AVDIVF, r0) opset(AVDIVD, r0) + opset(AVSHUFH, r0) + opset(AVSHUFW, r0) + opset(AVSHUFV, r0) case AXVSEQB: opset(AXVSEQH, r0) @@ -1736,6 +1778,9 @@ func buildop(ctxt *obj.Link) { opset(AXVMULD, r0) opset(AXVDIVF, r0) opset(AXVDIVD, r0) + opset(AXVSHUFH, r0) + opset(AXVSHUFW, r0) + opset(AXVSHUFV, r0) case AVANDB: opset(AVORB, r0) @@ -1745,6 +1790,11 @@ func buildop(ctxt *obj.Link) { opset(AVSHUF4IH, r0) opset(AVSHUF4IW, r0) opset(AVSHUF4IV, r0) + opset(AVPERMIW, r0) + opset(AVEXTRINSB, r0) + opset(AVEXTRINSH, r0) + opset(AVEXTRINSW, r0) + opset(AVEXTRINSV, r0) case AXVANDB: opset(AXVORB, r0) @@ -1754,6 +1804,13 @@ func buildop(ctxt *obj.Link) { opset(AXVSHUF4IH, r0) opset(AXVSHUF4IW, r0) opset(AXVSHUF4IV, r0) + opset(AXVPERMIW, r0) + opset(AXVPERMIV, r0) + opset(AXVPERMIQ, r0) + opset(AXVEXTRINSB, r0) + opset(AXVEXTRINSH, r0) + opset(AXVEXTRINSW, r0) + opset(AXVEXTRINSV, r0) case AVANDV: opset(AVORV, r0) @@ -1833,6 +1890,22 @@ func buildop(ctxt *obj.Link) { opset(AVSUBW, r0) opset(AVSUBV, r0) opset(AVSUBQ, r0) + opset(AVSADDB, r0) + opset(AVSADDH, r0) + opset(AVSADDW, r0) + opset(AVSADDV, r0) + opset(AVSSUBB, r0) + opset(AVSSUBH, r0) + opset(AVSSUBW, r0) + opset(AVSSUBV, r0) + opset(AVSADDBU, r0) + opset(AVSADDHU, r0) + opset(AVSADDWU, r0) + opset(AVSADDVU, r0) + opset(AVSSUBBU, r0) + opset(AVSSUBHU, r0) + opset(AVSSUBWU, r0) + opset(AVSSUBVU, r0) case AXVADDB: opset(AXVADDH, r0) @@ -1844,6 +1917,22 @@ func buildop(ctxt *obj.Link) { opset(AXVSUBW, r0) opset(AXVSUBV, r0) opset(AXVSUBQ, r0) + opset(AXVSADDB, r0) + opset(AXVSADDH, r0) + opset(AXVSADDW, r0) + opset(AXVSADDV, r0) + opset(AXVSSUBB, r0) + opset(AXVSSUBH, r0) + opset(AXVSSUBW, r0) + opset(AXVSSUBV, r0) + opset(AXVSADDBU, r0) + opset(AXVSADDHU, r0) + opset(AXVSADDWU, r0) + opset(AXVSADDVU, r0) + opset(AXVSSUBBU, r0) + opset(AXVSSUBHU, r0) + opset(AXVSSUBWU, r0) + opset(AXVSSUBVU, r0) case AVSLLB: opset(AVSRLB, r0) @@ -2051,24 +2140,32 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { o3 := uint32(0) o4 := uint32(0) o5 := uint32(0) + o6 := uint32(0) add := AADDU add = AADDVU switch o.type_ { default: - c.ctxt.Diag("unknown type %d %v", o.type_) + c.ctxt.Diag("unknown type %d", o.type_) prasm(p) case 0: // pseudo ops break - case 1: // mov r1,r2 ==> OR r1,r0,r2 - a := AOR - if p.As == AMOVW { - a = ASLL + case 1: // mov rj, rd + switch p.As { + case AMOVW: + o1 = OP_RRR(c.oprrr(ASLL), uint32(REGZERO), uint32(p.From.Reg), uint32(p.To.Reg)) + case AMOVV: + o1 = OP_RRR(c.oprrr(AOR), uint32(REGZERO), uint32(p.From.Reg), uint32(p.To.Reg)) + case AVMOVQ: + o1 = OP_6IRR(c.opirr(AVSLLV), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg)) + case AXVMOVQ: + o1 = OP_6IRR(c.opirr(AXVSLLV), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg)) + default: + c.ctxt.Diag("unexpected encoding\n%v", p) } - o1 = OP_RRR(c.oprrr(a), uint32(REGZERO), uint32(p.From.Reg), uint32(p.To.Reg)) case 2: // add/sub r1,[r2],r3 r := int(p.Reg) @@ -2955,18 +3052,51 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { o4 = OP_RRR(c.oprrr(p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg)) case 73: - v := c.regoff(&p.To) + v := c.vregoff(&p.To) + r := p.To.Reg if v&3 != 0 { c.ctxt.Diag("%v: offset must be a multiple of 4.\n", p) } - o1 = OP_14IRR(c.opirr(p.As), uint32(v>>2), uint32(p.To.Reg), uint32(p.From.Reg)) + + switch o.size { + case 4: // 16 bit + o1 = OP_14IRR(c.opirr(p.As), uint32(v>>2), uint32(r), uint32(p.From.Reg)) + case 12: // 32 bit + o1 = OP_16IRR(c.opirr(AADDV16), uint32(v>>16), uint32(REG_R0), uint32(REGTMP)) + o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP)) + o3 = OP_14IRR(c.opirr(p.As), uint32(v>>2), uint32(REGTMP), uint32(p.From.Reg)) + case 24: // 64 bit + o1 = OP_IR(c.opir(ALU12IW), uint32(v>>12), uint32(REGTMP)) + o2 = OP_12IRR(c.opirr(AOR), uint32(v), uint32(REGTMP), uint32(REGTMP)) + o3 = OP_IR(c.opir(ALU32ID), uint32(v>>32), uint32(REGTMP)) + o4 = OP_12IRR(c.opirr(ALU52ID), uint32(v>>52), uint32(REGTMP), uint32(REGTMP)) + o5 = OP_RRR(c.oprrr(add), uint32(REGTMP), uint32(r), uint32(r)) + o6 = OP_14IRR(c.opirr(p.As), uint32(0), uint32(r), uint32(p.From.Reg)) + } case 74: - v := c.regoff(&p.From) + v := c.vregoff(&p.From) + r := p.From.Reg if v&3 != 0 { c.ctxt.Diag("%v: offset must be a multiple of 4.\n", p) } - o1 = OP_14IRR(c.opirr(-p.As), uint32(v>>2), uint32(p.From.Reg), uint32(p.To.Reg)) + + switch o.size { + case 4: // 16 bit + o1 = OP_14IRR(c.opirr(-p.As), uint32(v>>2), uint32(r), uint32(p.To.Reg)) + case 12: // 32 bit + o1 = OP_16IRR(c.opirr(AADDV16), uint32(v>>16), uint32(REG_R0), uint32(REGTMP)) + o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP)) + o3 = OP_14IRR(c.opirr(-p.As), uint32(v>>2), uint32(REGTMP), uint32(p.To.Reg)) + case 24: // 64 bit + o1 = OP_IR(c.opir(ALU12IW), uint32(v>>12), uint32(REGTMP)) + o2 = OP_12IRR(c.opirr(AOR), uint32(v), uint32(REGTMP), uint32(REGTMP)) + o3 = OP_IR(c.opir(ALU32ID), uint32(v>>32), uint32(REGTMP)) + o4 = OP_12IRR(c.opirr(ALU52ID), uint32(v>>52), uint32(REGTMP), uint32(REGTMP)) + o5 = OP_RRR(c.oprrr(add), uint32(REGTMP), uint32(r), uint32(r)) + o6 = OP_14IRR(c.opirr(p.As), uint32(0), uint32(r), uint32(p.To.Reg)) + } + } out[0] = o1 @@ -2974,6 +3104,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { out[2] = o3 out[3] = o4 out[4] = o5 + out[5] = o6 } // checkoperand checks if operand >= 0 && operand <= maxoperand @@ -3018,6 +3149,10 @@ func (c *ctxt0) oprrrr(a obj.As) uint32 { return 0x8d << 20 // fnmsub.s case AFNMSUBD: return 0x8e << 20 // fnmsub.d + case AVSHUFB: + return 0x0D5 << 20 // vshuf.b + case AXVSHUFB: + return 0x0D6 << 20 // xvshuf.b } c.ctxt.Diag("bad rrrr opcode %v", a) @@ -3526,6 +3661,70 @@ func (c *ctxt0) oprrr(a obj.As) uint32 { return 0xe81b << 15 // xvsub.d case AXVSUBQ: return 0xea5b << 15 // xvsub.q + case AVSADDB: + return 0x0E08C << 15 // vsadd.b + case AVSADDH: + return 0x0E08D << 15 // vsadd.h + case AVSADDW: + return 0x0E08E << 15 // vsadd.w + case AVSADDV: + return 0x0E08F << 15 // vsadd.d + case AVSSUBB: + return 0x0E090 << 15 // vssub.b + case AVSSUBH: + return 0x0E091 << 15 // vssub.w + case AVSSUBW: + return 0x0E092 << 15 // vssub.h + case AVSSUBV: + return 0x0E093 << 15 // vssub.d + case AVSADDBU: + return 0x0E094 << 15 // vsadd.bu + case AVSADDHU: + return 0x0E095 << 15 // vsadd.hu + case AVSADDWU: + return 0x0E096 << 15 // vsadd.wu + case AVSADDVU: + return 0x0E097 << 15 // vsadd.du + case AVSSUBBU: + return 0x0E098 << 15 // vssub.bu + case AVSSUBHU: + return 0x0E099 << 15 // vssub.wu + case AVSSUBWU: + return 0x0E09A << 15 // vssub.hu + case AVSSUBVU: + return 0x0E09B << 15 // vssub.du + case AXVSADDB: + return 0x0E88C << 15 // vxsadd.b + case AXVSADDH: + return 0x0E88D << 15 // vxsadd.h + case AXVSADDW: + return 0x0E88E << 15 // vxsadd.w + case AXVSADDV: + return 0x0E88F << 15 // vxsadd.d + case AXVSSUBB: + return 0x0E890 << 15 // xvssub.b + case AXVSSUBH: + return 0x0E891 << 15 // xvssub.h + case AXVSSUBW: + return 0x0E892 << 15 // xvssub.w + case AXVSSUBV: + return 0x0E893 << 15 // xvssub.d + case AXVSADDBU: + return 0x0E894 << 15 // vxsadd.bu + case AXVSADDHU: + return 0x0E896 << 15 // vxsadd.hu + case AXVSADDWU: + return 0x0E896 << 15 // vxsadd.wu + case AXVSADDVU: + return 0x0E897 << 15 // vxsadd.du + case AXVSSUBBU: + return 0x0E898 << 15 // xvssub.bu + case AXVSSUBHU: + return 0x0E899 << 15 // xvssub.hu + case AXVSSUBWU: + return 0x0E89A << 15 // xvssub.wu + case AXVSSUBVU: + return 0x0E89B << 15 // xvssub.du case AVILVLB: return 0xe234 << 15 // vilvl.b case AVILVLH: @@ -3686,6 +3885,18 @@ func (c *ctxt0) oprrr(a obj.As) uint32 { return 0xea22 << 15 // xvbitrev.w case AXVBITREVV: return 0xea23 << 15 // xvbitrev.d + case AVSHUFH: + return 0x0E2F5 << 15 // vshuf.h + case AVSHUFW: + return 0x0E2F6 << 15 // vshuf.w + case AVSHUFV: + return 0x0E2F7 << 15 // vshuf.d + case AXVSHUFH: + return 0x0EAF5 << 15 // xvshuf.h + case AXVSHUFW: + return 0x0EAF6 << 15 // xvshuf.w + case AXVSHUFV: + return 0x0EAF7 << 15 // xvshuf.d } if a < 0 { @@ -4143,13 +4354,13 @@ func (c *ctxt0) opirr(a obj.As) uint32 { case AROTRV: return 0x004d << 16 case -ALL: - return 0x020 << 24 + return 0x020 << 24 // ll.w case -ALLV: - return 0x022 << 24 + return 0x022 << 24 // ll.d case ASC: - return 0x021 << 24 + return 0x021 << 24 // sc.w case ASCV: - return 0x023 << 24 + return 0x023 << 24 // sc.d case AVANDB: return 0x1CF4 << 18 // vandi.b case AVORB: @@ -4294,6 +4505,30 @@ func (c *ctxt0) opirr(a obj.As) uint32 { return 0x1de6 << 18 // xvshuf4i.w case AXVSHUF4IV: return 0x1de7 << 18 // xvshuf4i.d + case AVPERMIW: + return 0x1cf9 << 18 // vpermi.w + case AXVPERMIW: + return 0x1df9 << 18 // xvpermi.w + case AXVPERMIV: + return 0x1dfa << 18 // xvpermi.d + case AXVPERMIQ: + return 0x1dfb << 18 // xvpermi.q + case AVEXTRINSB: + return 0x1ce3 << 18 // vextrins.b + case AVEXTRINSH: + return 0x1ce2 << 18 // vextrins.h + case AVEXTRINSW: + return 0x1ce1 << 18 // vextrins.w + case AVEXTRINSV: + return 0x1ce0 << 18 // vextrins.d + case AXVEXTRINSB: + return 0x1de3 << 18 // xvextrins.b + case AXVEXTRINSH: + return 0x1de2 << 18 // xvextrins.h + case AXVEXTRINSW: + return 0x1de1 << 18 // xvextrins.w + case AXVEXTRINSV: + return 0x1de0 << 18 // xvextrins.d case AVBITCLRB: return 0x1CC4<<18 | 0x1<<13 // vbitclri.b case AVBITCLRH: @@ -4438,7 +4673,7 @@ func (c *ctxt0) specialFpMovInst(a obj.As, fclass int, tclass int) uint32 { } } - c.ctxt.Diag("bad class combination: %s %s,%s\n", a, fclass, tclass) + c.ctxt.Diag("bad class combination: %s %d,%d\n", a, fclass, tclass) return 0 } diff --git a/src/cmd/internal/obj/loong64/doc.go b/src/cmd/internal/obj/loong64/doc.go index f7e5a4fb427..19c9e05590c 100644 --- a/src/cmd/internal/obj/loong64/doc.go +++ b/src/cmd/internal/obj/loong64/doc.go @@ -203,6 +203,15 @@ Note: In the following sections 3.1 to 3.6, "ui4" (4-bit unsigned int immediate) VMOVQ Vj.W[index], Vd.W4 | vreplvei.w vd, vj, ui2 | for i in range(4) : VR[vd].w[i] = VR[vj].w[ui2] VMOVQ Vj.V[index], Vd.V2 | vreplvei.d vd, vj, ui1 | for i in range(2) : VR[vd].d[i] = VR[vj].d[ui1] +3.7 Move vector register to vector register. + Instruction format: + VMOVQ Vj, Vd + + Mapping between Go and platform assembly: + Go assembly | platform assembly | semantics + VMOVQ Vj, Vd | vslli.d vd, vj, 0x0 | for i in range(2) : VR[vd].D[i] = SLL(VR[vj].D[i], 0) + VXMOVQ Xj, Xd | xvslli.d xd, xj, 0x0 | for i in range(4) : XR[xd].D[i] = SLL(XR[xj].D[i], 0) + 3.7 Load data from memory and broadcast to each element of a vector register. Instruction format: @@ -229,6 +238,40 @@ Note: In the following sections 3.1 to 3.6, "ui4" (4-bit unsigned int immediate) VMOVQ 8(R4), V5.W4 | vldrepl.w v5, r4, $2 VMOVQ 8(R4), V5.V2 | vldrepl.d v5, r4, $1 +3.8 Vector permutation instruction + Instruction format: + VPERMIW ui8, Vj, Vd + + Mapping between Go and platform assembly: + Go assembly | platform assembly | semantics + VPERMIW ui8, Vj, Vd | vpermi.w vd, vj, ui8 | VR[vd].W[0] = VR[vj].W[ui8[1:0]], VR[vd].W[1] = VR[vj].W[ui8[3:2]], + | | VR[vd].W[2] = VR[vd].W[ui8[5:4]], VR[vd].W[3] = VR[vd].W[ui8[7:6]] + XVPERMIW ui8, Xj, Xd | xvpermi.w xd, xj, ui8 | XR[xd].W[0] = XR[xj].W[ui8[1:0]], XR[xd].W[1] = XR[xj].W[ui8[3:2]], + | | XR[xd].W[3] = XR[xd].W[ui8[7:6]], XR[xd].W[2] = XR[xd].W[ui8[5:4]], + | | XR[xd].W[4] = XR[xj].W[ui8[1:0]+4], XR[xd].W[5] = XR[xj].W[ui8[3:2]+4], + | | XR[xd].W[6] = XR[xd].W[ui8[5:4]+4], XR[xd].W[7] = XR[xd].W[ui8[7:6]+4] + XVPERMIV ui8, Xj, Xd | xvpermi.d xd, xj, ui8 | XR[xd].D[0] = XR[xj].D[ui8[1:0]], XR[xd].D[1] = XR[xj].D[ui8[3:2]], + | | XR[xd].D[2] = XR[xj].D[ui8[5:4]], XR[xd].D[3] = XR[xj].D[ui8[7:6]] + XVPERMIQ ui8, Xj, Xd | xvpermi.q xd, xj, ui8 | vec = {XR[xd], XR[xj]}, XR[xd].Q[0] = vec.Q[ui8[1:0]], XR[xd].Q[1] = vec.Q[ui8[5:4]] + +3.9 Vector misc instruction + +3.9.1 {,X}VEXTRINS.{B,H,W,V} + + Instruction format: + VEXTRINSB ui8, Vj, Vd + + Mapping between Go and platform assembly: + Go assembly | platform assembly | semantics + VEXTRINSB ui8, Vj, Vd | vextrins.b vd, vj, ui8 | VR[vd].B[ui8[7:4]] = VR[vj].B[ui8[3:0]] + VEXTRINSH ui8, Vj, Vd | vextrins.h vd, vj, ui8 | VR[vd].H[ui8[6:4]] = VR[vj].H[ui8[2:0]] + VEXTRINSW ui8, Vj, Vd | vextrins.w vd, vj, ui8 | VR[vd].W[ui8[5:4]] = VR[vj].W[ui8[1:0]] + VEXTRINSV ui8, Vj, Vd | vextrins.d vd, vj, ui8 | VR[vd].D[ui8[4]] = VR[vj].D[ui8[0]] + XVEXTRINSB ui8, Vj, Vd | xvextrins.b vd, vj, ui8 | XR[xd].B[ui8[7:4]] = XR[xj].B[ui8[3:0]], XR[xd].B[ui8[7:4]+16] = XR[xj].B[ui8[3:0]+16] + XVEXTRINSH ui8, Vj, Vd | xvextrins.h vd, vj, ui8 | XR[xd].H[ui8[6:4]] = XR[xj].H[ui8[2:0]], XR[xd].H[ui8[6:4]+8] = XR[xj].H[ui8[2:0]+8] + XVEXTRINSW ui8, Vj, Vd | xvextrins.w vd, vj, ui8 | XR[xd].W[ui8[5:4]] = XR[xj].W[ui8[1:0]], XR[xd].W[ui8[5:4]+4] = XR[xj].W[ui8[1:0]+4] + XVEXTRINSV ui8, Vj, Vd | xvextrins.d vd, vj, ui8 | XR[xd].D[ui8[4]] = XR[xj].D[ui8[0]],XR[xd].D[ui8[4]+2] = XR[xj].D[ui8[0]+2] + # Special instruction encoding definition and description on LoongArch 1. DBAR hint encoding for LA664(Loongson 3A6000) and later micro-architectures, paraphrased diff --git a/src/cmd/internal/obj/mips/a.out.go b/src/cmd/internal/obj/mips/a.out.go index 5439f0e4aaf..b7e82c50f40 100644 --- a/src/cmd/internal/obj/mips/a.out.go +++ b/src/cmd/internal/obj/mips/a.out.go @@ -245,7 +245,7 @@ var MIPSDWARFRegisters = map[int16]int16{} func init() { // f assigns dwarfregisters[from:to] = (base):(to-from+base) f := func(from, to, base int16) { - for r := int16(from); r <= to; r++ { + for r := from; r <= to; r++ { MIPSDWARFRegisters[r] = (r - from) + base } } diff --git a/src/cmd/internal/obj/mips/asm0.go b/src/cmd/internal/obj/mips/asm0.go index 2de5a4d6c0b..a55953e7414 100644 --- a/src/cmd/internal/obj/mips/asm0.go +++ b/src/cmd/internal/obj/mips/asm0.go @@ -1172,7 +1172,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { } switch o.type_ { default: - c.ctxt.Diag("unknown type %d %v", o.type_) + c.ctxt.Diag("unknown type %d", o.type_) prasm(p) case 0: /* pseudo ops */ diff --git a/src/cmd/internal/obj/objfile.go b/src/cmd/internal/obj/objfile.go index 3299fbf4e6e..4401f1bb74e 100644 --- a/src/cmd/internal/obj/objfile.go +++ b/src/cmd/internal/obj/objfile.go @@ -166,7 +166,7 @@ func WriteObjFile(ctxt *Link, b *bio.Writer) { w.Uint32(uint32(dataOff)) dataOff += int64(len(s.P)) if file := s.File(); file != nil { - dataOff += int64(file.Size) + dataOff += file.Size } } } diff --git a/src/cmd/internal/obj/pcln.go b/src/cmd/internal/obj/pcln.go index 67a078091c5..1cfcde7aa5b 100644 --- a/src/cmd/internal/obj/pcln.go +++ b/src/cmd/internal/obj/pcln.go @@ -22,7 +22,7 @@ import ( // // where func is the function, val is the current value, p is the instruction being // considered, and arg can be used to further parameterize valfunc. -func funcpctab(ctxt *Link, func_ *LSym, desc string, valfunc func(*Link, *LSym, int32, *Prog, int32, interface{}) int32, arg interface{}) *LSym { +func funcpctab(ctxt *Link, func_ *LSym, desc string, valfunc func(*Link, *LSym, int32, *Prog, int32, any) int32, arg any) *LSym { dbg := desc == ctxt.Debugpcln dst := []byte{} sym := &LSym{ @@ -138,7 +138,7 @@ func funcpctab(ctxt *Link, func_ *LSym, desc string, valfunc func(*Link, *LSym, // or the line number (arg == 1) to use at p. // Because p.Pos applies to p, phase == 0 (before p) // takes care of the update. -func pctofileline(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg interface{}) int32 { +func pctofileline(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg any) int32 { if p.As == ATEXT || p.As == ANOP || p.Pos.Line() == 0 || phase == 1 { return oldval } @@ -198,7 +198,7 @@ func (s *pcinlineState) setParentPC(ctxt *Link, globalIndex int, pc int32) { // pctoinline computes the index into the local inlining tree to use at p. // If p is not the result of inlining, pctoinline returns -1. Because p.Pos // applies to p, phase == 0 (before p) takes care of the update. -func (s *pcinlineState) pctoinline(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg interface{}) int32 { +func (s *pcinlineState) pctoinline(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg any) int32 { if phase == 1 { return oldval } @@ -224,7 +224,7 @@ func (s *pcinlineState) pctoinline(ctxt *Link, sym *LSym, oldval int32, p *Prog, // It is oldval plus any adjustment made by p itself. // The adjustment by p takes effect only after p, so we // apply the change during phase == 1. -func pctospadj(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg interface{}) int32 { +func pctospadj(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg any) int32 { if oldval == -1 { // starting oldval = 0 } @@ -245,7 +245,7 @@ func pctospadj(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg in // non-PCDATA instructions. // Since PCDATA instructions have no width in the final code, // it does not matter which phase we use for the update. -func pctopcdata(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg interface{}) int32 { +func pctopcdata(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg any) int32 { if phase == 0 || p.As != APCDATA || p.From.Offset != int64(arg.(uint32)) { return oldval } @@ -337,7 +337,7 @@ func linkpcln(ctxt *Link, cursym *LSym) { Attribute: AttrContentAddressable | AttrPcdata, } } else { - pcln.Pcdata[i] = funcpctab(ctxt, cursym, "pctopcdata", pctopcdata, interface{}(uint32(i))) + pcln.Pcdata[i] = funcpctab(ctxt, cursym, "pctopcdata", pctopcdata, any(uint32(i))) } } diff --git a/src/cmd/internal/obj/plist.go b/src/cmd/internal/obj/plist.go index 698e5ace9cc..69914b1c1f3 100644 --- a/src/cmd/internal/obj/plist.go +++ b/src/cmd/internal/obj/plist.go @@ -63,12 +63,12 @@ func Flushplist(ctxt *Link, plist *Plist, newprog ProgAlloc) { switch p.To.Sym.Name { case "go_args_stackmap": if p.From.Type != TYPE_CONST || p.From.Offset != abi.FUNCDATA_ArgsPointerMaps { - ctxt.Diag("%s: FUNCDATA use of go_args_stackmap(SB) without FUNCDATA_ArgsPointerMaps", p.Pos) + ctxt.Diag("%v: FUNCDATA use of go_args_stackmap(SB) without FUNCDATA_ArgsPointerMaps", p) } p.To.Sym = ctxt.LookupDerived(curtext, curtext.Name+".args_stackmap") case "no_pointers_stackmap": if p.From.Type != TYPE_CONST || p.From.Offset != abi.FUNCDATA_LocalsPointerMaps { - ctxt.Diag("%s: FUNCDATA use of no_pointers_stackmap(SB) without FUNCDATA_LocalsPointerMaps", p.Pos) + ctxt.Diag("%v: FUNCDATA use of no_pointers_stackmap(SB) without FUNCDATA_LocalsPointerMaps", p) } // funcdata for functions with no local variables in frame. // Define two zero-length bitmaps, because the same index is used diff --git a/src/cmd/internal/obj/ppc64/a.out.go b/src/cmd/internal/obj/ppc64/a.out.go index aa7bcd30681..d103ebcfc35 100644 --- a/src/cmd/internal/obj/ppc64/a.out.go +++ b/src/cmd/internal/obj/ppc64/a.out.go @@ -311,7 +311,7 @@ var PPC64DWARFRegisters = map[int16]int16{} func init() { // f assigns dwarfregister[from:to] = (base):(to-from+base) f := func(from, to, base int16) { - for r := int16(from); r <= to; r++ { + for r := from; r <= to; r++ { PPC64DWARFRegisters[r] = r - from + base } } diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go index dcd3aa59a46..a39c206c22d 100644 --- a/src/cmd/internal/obj/ppc64/asm9.go +++ b/src/cmd/internal/obj/ppc64/asm9.go @@ -2655,7 +2655,7 @@ func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) { case 9: /* RLDC Ra, $sh, $mb, Rb */ sh := uint32(p.RestArgs[0].Addr.Offset) & 0x3F mb := uint32(p.RestArgs[1].Addr.Offset) & 0x3F - o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), (uint32(sh) & 0x1F)) + o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), (sh & 0x1F)) o1 |= (sh & 0x20) >> 4 // sh[5] is placed in bit 1. o1 |= (mb & 0x1F) << 6 // mb[0:4] is placed in bits 6-10. o1 |= (mb & 0x20) // mb[5] is placed in bit 5 @@ -2784,7 +2784,7 @@ func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) { if n > b || b > 63 { c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p) } - o1 = AOP_MD(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n)) + o1 = AOP_MD(OP_RLDIC, uint32(p.To.Reg), r, uint32(n), uint32(b)-uint32(n)) default: c.ctxt.Diag("unexpected op in rldc case\n%v", p) @@ -2967,7 +2967,7 @@ func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) { c.ctxt.Diag("%v is not supported", p) } if o.ispfx { - o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, d) + o1, o2 = pfxadd(p.To.Reg, int16(r), PFX_R_ABS, d) } else if o.size == 8 { o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d))) // tmp = uint16(d) o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) // to = tmp + from @@ -2979,7 +2979,7 @@ func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) { } else { // For backwards compatibility with GOPPC64 < 10, generate 34b constants in register. o1 = LOP_IRR(OP_ADDIS, REGZERO, REGTMP, uint32(d>>32)) // tmp = sign_extend((d>>32)&0xFFFF0000) - o2 = loadl16(REGTMP, int64(d>>16)) // tmp |= (d>>16)&0xFFFF + o2 = loadl16(REGTMP, d>>16) // tmp |= (d>>16)&0xFFFF o3 = AOP_MD(OP_RLDICR, REGTMP, REGTMP, 16, 63-16) // tmp <<= 16 o4 = loadl16(REGTMP, int64(uint16(d))) // tmp |= d&0xFFFF o5 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) @@ -3080,9 +3080,9 @@ func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) { if o.ispfx { if rel == nil { - o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, v) + o1, o2 = pfxadd(p.To.Reg, int16(r), PFX_R_ABS, v) } else { - o1, o2 = pfxadd(int16(p.To.Reg), REG_R0, PFX_R_PCREL, 0) + o1, o2 = pfxadd(p.To.Reg, REG_R0, PFX_R_PCREL, 0) rel.Type = objabi.R_ADDRPOWER_PCREL34 } } @@ -3519,7 +3519,7 @@ func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) { v |= 1 << 8 } - o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12 + o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | v<<12 case 70: /* cmp* r,r,cr or cmp*i r,i,cr or fcmp f,f,cr or cmpeqb r,r */ r := uint32(p.Reg&7) << 2 diff --git a/src/cmd/internal/obj/ppc64/asm_test.go b/src/cmd/internal/obj/ppc64/asm_test.go index ab7af2205e6..9f1acf4b62e 100644 --- a/src/cmd/internal/obj/ppc64/asm_test.go +++ b/src/cmd/internal/obj/ppc64/asm_test.go @@ -439,7 +439,7 @@ func TestAddrClassifier(t *testing.T) { } tsts := [...]struct { arg obj.Addr - output interface{} + output any }{ // Supported register type args {obj.Addr{Type: obj.TYPE_REG, Reg: REG_R1}, C_REG}, diff --git a/src/cmd/internal/obj/ppc64/obj9.go b/src/cmd/internal/obj/ppc64/obj9.go index 5615b70aad4..bae29656397 100644 --- a/src/cmd/internal/obj/ppc64/obj9.go +++ b/src/cmd/internal/obj/ppc64/obj9.go @@ -903,44 +903,38 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q.To.Reg = REGSP q.Spadj = autosize } else { - // Frame size is too large for a MOVDU instruction. - // Store link register before decrementing SP, so if a signal comes - // during the execution of the function prologue, the traceback - // code will not see a half-updated stack frame. - // This sequence is not async preemptible, as if we open a frame - // at the current SP, it will clobber the saved LR. + // Frame size is too large for an stdu MOVDU instruction, use stdux MOVDU. q = obj.Appendp(q, c.newprog) q.As = AMOVD q.Pos = p.Pos q.From.Type = obj.TYPE_REG q.From.Reg = REG_LR q.To.Type = obj.TYPE_REG - q.To.Reg = REG_R29 // REGTMP may be used to synthesize large offset in the next instruction - - q = c.ctxt.StartUnsafePoint(q, c.newprog) + q.To.Reg = REG_R29 + // Create stack adjustment in REGTMP q = obj.Appendp(q, c.newprog) q.As = AMOVD q.Pos = p.Pos - q.From.Type = obj.TYPE_REG - q.From.Reg = REG_R29 - q.To.Type = obj.TYPE_MEM - q.To.Offset = int64(-autosize) - q.To.Reg = REGSP - - prologueEnd = q - - q = obj.Appendp(q, c.newprog) - q.As = AADD - q.Pos = p.Pos q.From.Type = obj.TYPE_CONST q.From.Offset = int64(-autosize) q.To.Type = obj.TYPE_REG - q.To.Reg = REGSP - q.Spadj = +autosize + q.To.Reg = REGTMP - q = c.ctxt.EndUnsafePoint(q, c.newprog, -1) + prologueEnd = q + + // MOVDU R29, R31(R1) + q = obj.Appendp(q, c.newprog) + q.As = AMOVDU + q.Pos = p.Pos + q.From.Type = obj.TYPE_REG + q.From.Reg = REG_R29 + q.To.Type = obj.TYPE_MEM + q.To.Reg = REGTMP + q.To.Index = REGSP + q.Spadj = autosize } + prologueEnd.Pos = prologueEnd.Pos.WithXlogue(src.PosPrologueEnd) } else if c.cursym.Func().Text.Mark&LEAF == 0 { // A very few functions that do not return to their caller diff --git a/src/cmd/internal/obj/riscv/obj.go b/src/cmd/internal/obj/riscv/obj.go index 91642ffbcb0..3deab34d312 100644 --- a/src/cmd/internal/obj/riscv/obj.go +++ b/src/cmd/internal/obj/riscv/obj.go @@ -70,6 +70,10 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { // form of the instruction. if p.From.Type == obj.TYPE_CONST { switch p.As { + case ACSUB: + p.As, p.From.Offset = ACADDI, -p.From.Offset + case ACSUBW: + p.As, p.From.Offset = ACADDIW, -p.From.Offset case ASUB: p.As, p.From.Offset = AADDI, -p.From.Offset case ASUBW: @@ -381,6 +385,10 @@ func InvertBranch(as obj.As) obj.As { return ABEQ case ABNEZ: return ABEQZ + case ACBEQZ: + return ACBNEZ + case ACBNEZ: + return ACBEQZ default: panic("InvertBranch: not a branch") } @@ -394,7 +402,7 @@ func containsCall(sym *obj.LSym) bool { switch p.As { case obj.ACALL: return true - case AJAL, AJALR: + case ACJALR, AJAL, AJALR: if p.From.Type == obj.TYPE_REG && p.From.Reg == REG_LR { return true } @@ -670,7 +678,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { for p := cursym.Func().Text; p != nil; p = p.Link { switch p.As { - case ABEQ, ABEQZ, ABGE, ABGEU, ABGEZ, ABGT, ABGTU, ABGTZ, ABLE, ABLEU, ABLEZ, ABLT, ABLTU, ABLTZ, ABNE, ABNEZ: + case ABEQ, ABEQZ, ABGE, ABGEU, ABGEZ, ABGT, ABGTU, ABGTZ, ABLE, ABLEU, ABLEZ, ABLT, ABLTU, ABLTZ, ABNE, ABNEZ, ACBEQZ, ACBNEZ, ACJ: if p.To.Type != obj.TYPE_BRANCH { ctxt.Diag("%v: instruction with branch-like opcode lacks destination", p) break @@ -752,7 +760,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { // instructions will break everything--don't do it! for p := cursym.Func().Text; p != nil; p = p.Link { switch p.As { - case ABEQ, ABEQZ, ABGE, ABGEU, ABGEZ, ABGT, ABGTU, ABGTZ, ABLE, ABLEU, ABLEZ, ABLT, ABLTU, ABLTZ, ABNE, ABNEZ: + case ABEQ, ABEQZ, ABGE, ABGEU, ABGEZ, ABGT, ABGTU, ABGTZ, ABLE, ABLEU, ABLEZ, ABLT, ABLTU, ABLTZ, ABNE, ABNEZ, ACBEQZ, ACBNEZ, ACJ: switch p.To.Type { case obj.TYPE_BRANCH: p.To.Type, p.To.Offset = obj.TYPE_CONST, p.To.Target().Pc-p.Pc @@ -905,7 +913,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, cursym *obj.LSym, newprog obj.ProgA to_done = p } else { // large stack: SP-framesize < stackguard-StackSmall - offset := int64(framesize) - abi.StackSmall + offset := framesize - abi.StackSmall if framesize > abi.StackBig { // Such a large stack we need to protect against underflow. // The runtime guarantees SP > objabi.StackBig, but @@ -1042,6 +1050,16 @@ func regVal(r, min, max uint32) uint32 { return r - min } +// regCI returns an integer register for use in a compressed instruction. +func regCI(r uint32) uint32 { + return regVal(r, REG_X8, REG_X15) +} + +// regCF returns a float register for use in a compressed instruction. +func regCF(r uint32) uint32 { + return regVal(r, REG_F8, REG_F15) +} + // regI returns an integer register. func regI(r uint32) uint32 { return regVal(r, REG_X0, REG_X31) @@ -1123,6 +1141,24 @@ func wantImmU(ctxt *obj.Link, ins *instruction, imm int64, nbits uint) { } } +func wantScaledImm(ctxt *obj.Link, ins *instruction, imm int64, nbits uint, scale int64, signed bool) { + if err := immFits(imm, nbits, signed); err != nil { + ctxt.Diag("%v: %v", ins, err) + return + } + if imm%scale != 0 { + ctxt.Diag("%v: unsigned immediate %d must be a multiple of %d", ins, imm, scale) + } +} + +func wantScaledImmI(ctxt *obj.Link, ins *instruction, imm int64, nbits uint, scale int64) { + wantScaledImm(ctxt, ins, imm, nbits, scale, true) +} + +func wantScaledImmU(ctxt *obj.Link, ins *instruction, imm int64, nbits uint, scale int64) { + wantScaledImm(ctxt, ins, imm, nbits, scale, false) +} + func wantReg(ctxt *obj.Link, ins *instruction, pos string, descr string, r, min, max uint32) { if r < min || r > max { var suffix string @@ -1144,11 +1180,23 @@ func wantIntReg(ctxt *obj.Link, ins *instruction, pos string, r uint32) { wantReg(ctxt, ins, pos, "integer", r, REG_X0, REG_X31) } +// wantIntPrimeReg checks that r is an integer register that can be used +// in a prime register field of a compressed instruction. +func wantIntPrimeReg(ctxt *obj.Link, ins *instruction, pos string, r uint32) { + wantReg(ctxt, ins, pos, "integer prime", r, REG_X8, REG_X15) +} + // wantFloatReg checks that r is a floating-point register. func wantFloatReg(ctxt *obj.Link, ins *instruction, pos string, r uint32) { wantReg(ctxt, ins, pos, "float", r, REG_F0, REG_F31) } +// wantFloatPrimeReg checks that r is an floating-point register that can +// be used in a prime register field of a compressed instruction. +func wantFloatPrimeReg(ctxt *obj.Link, ins *instruction, pos string, r uint32) { + wantReg(ctxt, ins, pos, "float prime", r, REG_F8, REG_F15) +} + // wantVectorReg checks that r is a vector register. func wantVectorReg(ctxt *obj.Link, ins *instruction, pos string, r uint32) { wantReg(ctxt, ins, pos, "vector", r, REG_V0, REG_V31) @@ -1161,6 +1209,206 @@ func wantEvenOffset(ctxt *obj.Link, ins *instruction, offset int64) { } } +func validateCA(ctxt *obj.Link, ins *instruction) { + wantIntPrimeReg(ctxt, ins, "rd", ins.rd) + if ins.rd != ins.rs1 { + ctxt.Diag("%v: rd must be the same as rs1", ins) + } + wantIntPrimeReg(ctxt, ins, "rs2", ins.rs2) +} + +func validateCB(ctxt *obj.Link, ins *instruction) { + if (ins.as == ACSRAI || ins.as == ACSRLI) && ins.imm == 0 { + ctxt.Diag("%v: immediate cannot be zero", ins) + } else if ins.as == ACSRAI || ins.as == ACSRLI { + wantImmU(ctxt, ins, ins.imm, 6) + } else if ins.as == ACBEQZ || ins.as == ACBNEZ { + wantImmI(ctxt, ins, ins.imm, 9) + } else { + wantImmI(ctxt, ins, ins.imm, 6) + } + if ins.as == ACBEQZ || ins.as == ACBNEZ { + wantNoneReg(ctxt, ins, "rd", ins.rd) + wantIntPrimeReg(ctxt, ins, "rs1", ins.rs1) + } else { + wantIntPrimeReg(ctxt, ins, "rd", ins.rd) + if ins.rd != ins.rs1 { + ctxt.Diag("%v: rd must be the same as rs1", ins) + } + } + wantNoneReg(ctxt, ins, "rs2", ins.rs2) +} + +func validateCI(ctxt *obj.Link, ins *instruction) { + if ins.as != ACNOP && ins.rd == REG_X0 { + ctxt.Diag("%v: cannot use register X0 in rd", ins) + } + if ins.as == ACLUI && ins.rd == REG_X2 { + ctxt.Diag("%v: cannot use register SP/X2 in rd", ins) + } + if ins.as != ACLI && ins.as != ACLUI && ins.as != ACLWSP && ins.as != ACLDSP && ins.as != ACFLDSP && ins.rd != ins.rs1 { + ctxt.Diag("%v: rd must be the same as rs1", ins) + } + if ins.as == ACADDI16SP && ins.rd != REG_SP { + ctxt.Diag("%v: rd must be SP/X2", ins) + } + if (ins.as == ACLWSP || ins.as == ACLDSP || ins.as == ACFLDSP) && ins.rs2 != REG_SP { + ctxt.Diag("%v: rs2 must be SP/X2", ins) + } + if (ins.as == ACADDI || ins.as == ACADDI16SP || ins.as == ACLUI || ins.as == ACSLLI) && ins.imm == 0 { + ctxt.Diag("%v: immediate cannot be zero", ins) + } else if ins.as == ACSLLI { + wantImmU(ctxt, ins, ins.imm, 6) + } else if ins.as == ACLWSP { + wantScaledImmU(ctxt, ins, ins.imm, 8, 4) + } else if ins.as == ACLDSP || ins.as == ACFLDSP { + wantScaledImmU(ctxt, ins, ins.imm, 9, 8) + } else if ins.as == ACADDI16SP { + wantScaledImmI(ctxt, ins, ins.imm, 10, 16) + } else { + wantImmI(ctxt, ins, ins.imm, 6) + } + switch ins.as { + case ACNOP, ACADDI, ACADDIW, ACSLLI: + wantIntReg(ctxt, ins, "rd", ins.rd) + wantIntReg(ctxt, ins, "rs1", ins.rs1) + wantNoneReg(ctxt, ins, "rs2", ins.rs2) + case ACLWSP, ACLDSP: + wantIntReg(ctxt, ins, "rd", ins.rd) + wantNoneReg(ctxt, ins, "rs1", ins.rs1) + wantIntReg(ctxt, ins, "rs2", ins.rs2) + case ACFLDSP: + wantFloatReg(ctxt, ins, "rd", ins.rd) + wantNoneReg(ctxt, ins, "rs1", ins.rs1) + wantIntReg(ctxt, ins, "rs2", ins.rs2) + case ACADDI16SP: + wantIntReg(ctxt, ins, "rd", ins.rd) + wantIntReg(ctxt, ins, "rs1", ins.rs1) + wantNoneReg(ctxt, ins, "rs2", ins.rs2) + default: + wantIntReg(ctxt, ins, "rd", ins.rd) + wantNoneReg(ctxt, ins, "rs1", ins.rs1) + wantNoneReg(ctxt, ins, "rs2", ins.rs2) + } +} + +func validateCIW(ctxt *obj.Link, ins *instruction) { + wantScaledImmU(ctxt, ins, ins.imm, 10, 4) + wantIntPrimeReg(ctxt, ins, "rd", ins.rd) + wantIntReg(ctxt, ins, "rs1", ins.rs1) + wantNoneReg(ctxt, ins, "rs2", ins.rs2) + if ins.imm == 0 { + ctxt.Diag("%v: immediate cannot be zero", ins) + } + if ins.rs1 != REG_SP { + ctxt.Diag("%v: SP/X2 must be in rs1", ins) + } +} + +func validateCJ(ctxt *obj.Link, ins *instruction) { + wantEvenOffset(ctxt, ins, ins.imm) + wantImmI(ctxt, ins, ins.imm, 12) + if ins.as != ACJ { + wantNoneReg(ctxt, ins, "rd", ins.rd) + wantIntReg(ctxt, ins, "rs1", ins.rs1) + wantIntReg(ctxt, ins, "rs2", ins.rs2) + if ins.rs1 == REG_X0 { + ctxt.Diag("%v: cannot use register X0 in rs1", ins) + } + } +} + +func validateCL(ctxt *obj.Link, ins *instruction) { + if ins.as == ACLW { + wantScaledImmU(ctxt, ins, ins.imm, 7, 4) + } else if ins.as == ACLD || ins.as == ACFLD { + wantScaledImmU(ctxt, ins, ins.imm, 8, 8) + } else { + wantImmI(ctxt, ins, ins.imm, 5) + } + if ins.as == ACFLD { + wantFloatPrimeReg(ctxt, ins, "rd", ins.rd) + } else { + wantIntPrimeReg(ctxt, ins, "rd", ins.rd) + } + wantIntPrimeReg(ctxt, ins, "rs1", ins.rs1) + wantNoneReg(ctxt, ins, "rs2", ins.rs2) +} + +func validateCR(ctxt *obj.Link, ins *instruction) { + switch ins.as { + case ACJR, ACJALR: + wantNoneReg(ctxt, ins, "rd", ins.rd) + wantIntReg(ctxt, ins, "rs1", ins.rs1) + wantNoneReg(ctxt, ins, "rs2", ins.rs2) + if ins.rs1 == REG_X0 { + ctxt.Diag("%v: cannot use register X0 in rs1", ins) + } + case ACMV: + wantIntReg(ctxt, ins, "rd", ins.rd) + wantNoneReg(ctxt, ins, "rs1", ins.rs1) + wantIntReg(ctxt, ins, "rs2", ins.rs2) + if ins.rd == REG_X0 { + ctxt.Diag("%v: cannot use register X0 in rd", ins) + } + if ins.rs2 == REG_X0 { + ctxt.Diag("%v: cannot use register X0 in rs2", ins) + } + case ACEBREAK: + wantNoneReg(ctxt, ins, "rd", ins.rd) + wantNoneReg(ctxt, ins, "rs1", ins.rs1) + wantNoneReg(ctxt, ins, "rs2", ins.rs2) + case ACADD: + wantIntReg(ctxt, ins, "rd", ins.rd) + if ins.rd == REG_X0 { + ctxt.Diag("%v: cannot use register X0 in rd", ins) + } + if ins.rd != ins.rs1 { + ctxt.Diag("%v: rd must be the same as rs1", ins) + } + wantIntReg(ctxt, ins, "rs2", ins.rs2) + if ins.rs2 == REG_X0 { + ctxt.Diag("%v: cannot use register X0 in rs2", ins) + } + } +} + +func validateCS(ctxt *obj.Link, ins *instruction) { + if ins.as == ACSW { + wantScaledImmU(ctxt, ins, ins.imm, 7, 4) + } else if ins.as == ACSD || ins.as == ACFSD { + wantScaledImmU(ctxt, ins, ins.imm, 8, 8) + } else { + wantImmI(ctxt, ins, ins.imm, 5) + } + wantNoneReg(ctxt, ins, "rd", ins.rd) + wantIntPrimeReg(ctxt, ins, "rs1", ins.rs1) + if ins.as == ACFSD { + wantFloatPrimeReg(ctxt, ins, "rs2", ins.rs2) + } else { + wantIntPrimeReg(ctxt, ins, "rs2", ins.rs2) + } +} + +func validateCSS(ctxt *obj.Link, ins *instruction) { + if ins.rd != REG_SP { + ctxt.Diag("%v: rd must be SP/X2", ins) + } + if ins.as == ACSWSP { + wantScaledImmU(ctxt, ins, ins.imm, 8, 4) + } else if ins.as == ACSDSP || ins.as == ACFSDSP { + wantScaledImmU(ctxt, ins, ins.imm, 9, 8) + } else { + wantImmI(ctxt, ins, ins.imm, 6) + } + wantNoneReg(ctxt, ins, "rs1", ins.rs1) + if ins.as == ACFSDSP { + wantFloatReg(ctxt, ins, "rs2", ins.rs2) + } else { + wantIntReg(ctxt, ins, "rs2", ins.rs2) + } +} + func validateRII(ctxt *obj.Link, ins *instruction) { wantIntReg(ctxt, ins, "rd", ins.rd) wantIntReg(ctxt, ins, "rs1", ins.rs1) @@ -1419,15 +1667,247 @@ func validateVsetvl(ctxt *obj.Link, ins *instruction) { func validateRaw(ctxt *obj.Link, ins *instruction) { // Treat the raw value specially as a 32-bit unsigned integer. // Nobody wants to enter negative machine code. - if ins.imm < 0 || 1<<32 <= ins.imm { - ctxt.Diag("%v: immediate %d in raw position cannot be larger than 32 bits", ins.as, ins.imm) - } + wantImmU(ctxt, ins, ins.imm, 32) } -// extractBitAndShift extracts the specified bit from the given immediate, -// before shifting it to the requested position and returning it. -func extractBitAndShift(imm uint32, bit, pos int) uint32 { - return ((imm >> bit) & 1) << pos +// compressedEncoding returns the fixed instruction encoding for a compressed +// instruction. +func compressedEncoding(as obj.As) uint32 { + enc := encode(as) + if enc == nil { + panic("compressedEncoding: could not encode instruction") + } + + // TODO: this can be removed once encode is reworked to return the + // necessary bits. + op := uint32(0) + switch as { + case ACSUB: + op = 0b100011<<10 | 0b00<<5 + case ACXOR: + op = 0b100011<<10 | 0b01<<5 + case ACOR: + op = 0b100011<<10 | 0b10<<5 + case ACAND: + op = 0b100011<<10 | 0b11<<5 + case ACSUBW: + op = 0b100111<<10 | 0b00<<5 + case ACADDW: + op = 0b100111<<10 | 0b01<<5 + case ACBEQZ: + op = 0b110 << 13 + case ACBNEZ: + op = 0b111 << 13 + case ACANDI: + op = 0b100<<13 | 0b10<<10 + case ACSRAI: + op = 0b100<<13 | 0b01<<10 + case ACSRLI: + op = 0b100<<13 | 0b00<<10 + case ACLI: + op = 0b010 << 13 + case ACLUI: + op = 0b011 << 13 + case ACLWSP: + op = 0b010 << 13 + case ACLDSP: + op = 0b011 << 13 + case ACFLDSP: + op = 0b001 << 13 + case ACADDIW: + op = 0b001 << 13 + case ACADDI16SP: + op = 0b011 << 13 + case ACADDI4SPN: + op = 0b000 << 13 + case ACJ: + op = 0b101 << 13 + case ACLW: + op = 0b010 << 13 + case ACLD: + op = 0b011 << 13 + case ACFLD: + op = 0b001 << 13 + case ACJR: + op = 0b1000 << 12 + case ACMV: + op = 0b1000 << 12 + case ACEBREAK: + op = 0b1001 << 12 + case ACJALR: + op = 0b1001 << 12 + case ACADD: + op = 0b1001 << 12 + case ACSW: + op = 0b110 << 13 + case ACSD: + op = 0b111 << 13 + case ACFSD: + op = 0b101 << 13 + case ACSWSP: + op = 0b110 << 13 + case ACSDSP: + op = 0b111 << 13 + case ACFSDSP: + op = 0b101 << 13 + } + + return op | enc.opcode +} + +// encodeBitPattern encodes an immediate value by extracting the specified +// bit pattern from the given immediate. Each value in the pattern specifies +// the position of the bit to extract from the immediate, which are then +// encoded in sequence. +func encodeBitPattern(imm uint32, pattern []int) uint32 { + outImm := uint32(0) + for _, bit := range pattern { + outImm = outImm<<1 | (imm>>bit)&1 + } + return outImm +} + +// encodeCA encodes a compressed arithmetic (CA-type) instruction. +func encodeCA(ins *instruction) uint32 { + return compressedEncoding(ins.as) | regCI(ins.rd)<<7 | regCI(ins.rs2)<<2 +} + +// encodeCBImmediate encodes an immediate for a CB-type RISC-V instruction. +func encodeCBImmediate(imm uint32) uint32 { + // Bit order - [8|4:3|7:6|2:1|5] + bits := encodeBitPattern(imm, []int{8, 4, 3, 7, 6, 2, 1, 5}) + return (bits>>5)<<10 | (bits&0x1f)<<2 +} + +// encodeCB encodes a compressed branch (CB-type) instruction. +func encodeCB(ins *instruction) uint32 { + imm := uint32(0) + if ins.as == ACBEQZ || ins.as == ACBNEZ { + imm = immI(ins.as, ins.imm, 9) + imm = encodeBitPattern(imm, []int{8, 4, 3, 7, 6, 2, 1, 5}) + } else if ins.as == ACANDI { + imm = immI(ins.as, ins.imm, 6) + imm = (imm>>5)<<7 | imm&0x1f + } else if ins.as == ACSRAI || ins.as == ACSRLI { + imm = immU(ins.as, ins.imm, 6) + imm = (imm>>5)<<7 | imm&0x1f + } + return compressedEncoding(ins.as) | (imm>>5)<<10 | regCI(ins.rs1)<<7 | (imm&0x1f)<<2 +} + +// encodeCI encodes a compressed immediate (CI-type) instruction. +func encodeCI(ins *instruction) uint32 { + imm := uint32(ins.imm) + if ins.as == ACLWSP { + // Bit order [5:2|7:6] + imm = encodeBitPattern(imm, []int{5, 4, 3, 2, 7, 6}) + } else if ins.as == ACLDSP || ins.as == ACFLDSP { + // Bit order [5:3|8:6] + imm = encodeBitPattern(imm, []int{5, 4, 3, 8, 7, 6}) + } else if ins.as == ACADDI16SP { + // Bit order [9|4|6|8:7|5] + imm = encodeBitPattern(imm, []int{9, 4, 6, 8, 7, 5}) + } + rd := uint32(0) + if ins.as == ACFLDSP { + rd = regF(ins.rd) + } else { + rd = regI(ins.rd) + } + return compressedEncoding(ins.as) | ((imm>>5)&0x1)<<12 | rd<<7 | (imm&0x1f)<<2 +} + +// encodeCIW encodes a compressed immediate wide (CIW-type) instruction. +func encodeCIW(ins *instruction) uint32 { + imm := uint32(ins.imm) + if ins.as == ACADDI4SPN { + // Bit order [5:4|9:6|2|3] + imm = encodeBitPattern(imm, []int{5, 4, 9, 8, 7, 6, 2, 3}) + } + return compressedEncoding(ins.as) | imm<<5 | regCI(ins.rd)<<2 +} + +// encodeCJImmediate encodes an immediate for a CJ-type RISC-V instruction. +func encodeCJImmediate(imm uint32) uint32 { + // Bit order - [11|4|9:8|10|6|7|3:1|5] + bits := encodeBitPattern(imm, []int{11, 4, 9, 8, 10, 6, 7, 3, 2, 1, 5}) + return bits << 2 +} + +// encodeCJ encodes a compressed jump (CJ-type) instruction. +func encodeCJ(ins *instruction) uint32 { + return compressedEncoding(ins.as) | encodeCJImmediate(uint32(ins.imm)) +} + +// encodeCL encodes a compressed load (CL-type) instruction. +func encodeCL(ins *instruction) uint32 { + imm := uint32(ins.imm) + if ins.as == ACLW { + // Bit order [5:2|6] + imm = encodeBitPattern(imm, []int{5, 4, 3, 2, 6}) + } else if ins.as == ACLD || ins.as == ACFLD { + // Bit order [5:3|7:6] + imm = encodeBitPattern(imm, []int{5, 4, 3, 7, 6}) + } + rd := uint32(0) + if ins.as == ACFLD { + rd = regCF(ins.rd) + } else { + rd = regCI(ins.rd) + } + return compressedEncoding(ins.as) | (imm>>2)<<10 | regCI(ins.rs1)<<7 | (imm&0x3)<<5 | rd<<2 +} + +// encodeCR encodes a compressed register (CR-type) instruction. +func encodeCR(ins *instruction) uint32 { + rs1, rs2 := uint32(0), uint32(0) + switch ins.as { + case ACJR, ACJALR: + rs1 = regI(ins.rs1) + case ACMV: + rs1, rs2 = regI(ins.rd), regI(ins.rs2) + case ACADD: + rs1, rs2 = regI(ins.rs1), regI(ins.rs2) + } + return compressedEncoding(ins.as) | rs1<<7 | rs2<<2 +} + +// encodeCS encodes a compressed store (CS-type) instruction. +func encodeCS(ins *instruction) uint32 { + imm := uint32(ins.imm) + if ins.as == ACSW { + // Bit order [5:3|2|6] + imm = encodeBitPattern(imm, []int{5, 4, 3, 2, 6}) + } else if ins.as == ACSD || ins.as == ACFSD { + // Bit order [5:3|7:6] + imm = encodeBitPattern(imm, []int{5, 4, 3, 7, 6}) + } + rs2 := uint32(0) + if ins.as == ACFSD { + rs2 = regCF(ins.rs2) + } else { + rs2 = regCI(ins.rs2) + } + return compressedEncoding(ins.as) | ((imm>>2)&0x7)<<10 | regCI(ins.rs1)<<7 | (imm&3)<<5 | rs2<<2 +} + +// encodeCSS encodes a compressed stack-relative store (CSS-type) instruction. +func encodeCSS(ins *instruction) uint32 { + imm := uint32(ins.imm) + if ins.as == ACSWSP { + // Bit order [5:2|7:6] + imm = encodeBitPattern(imm, []int{5, 4, 3, 2, 7, 6}) + } else if ins.as == ACSDSP || ins.as == ACFSDSP { + // Bit order [5:3|8:6] + imm = encodeBitPattern(imm, []int{5, 4, 3, 8, 7, 6}) + } + rs2 := uint32(0) + if ins.as == ACFSDSP { + rs2 = regF(ins.rs2) + } else { + rs2 = regI(ins.rs2) + } + return compressedEncoding(ins.as) | imm<<7 | rs2<<2 } // encodeR encodes an R-type RISC-V instruction. @@ -1649,37 +2129,6 @@ func encodeJ(ins *instruction) uint32 { return encodeJImmediate(imm) | rd<<7 | enc.opcode } -// encodeCBImmediate encodes an immediate for a CB-type RISC-V instruction. -func encodeCBImmediate(imm uint32) uint32 { - // Bit order - [8|4:3|7:6|2:1|5] - bits := extractBitAndShift(imm, 8, 7) - bits |= extractBitAndShift(imm, 4, 6) - bits |= extractBitAndShift(imm, 3, 5) - bits |= extractBitAndShift(imm, 7, 4) - bits |= extractBitAndShift(imm, 6, 3) - bits |= extractBitAndShift(imm, 2, 2) - bits |= extractBitAndShift(imm, 1, 1) - bits |= extractBitAndShift(imm, 5, 0) - return (bits>>5)<<10 | (bits&0x1f)<<2 -} - -// encodeCJImmediate encodes an immediate for a CJ-type RISC-V instruction. -func encodeCJImmediate(imm uint32) uint32 { - // Bit order - [11|4|9:8|10|6|7|3:1|5] - bits := extractBitAndShift(imm, 11, 10) - bits |= extractBitAndShift(imm, 4, 9) - bits |= extractBitAndShift(imm, 9, 8) - bits |= extractBitAndShift(imm, 8, 7) - bits |= extractBitAndShift(imm, 10, 6) - bits |= extractBitAndShift(imm, 6, 5) - bits |= extractBitAndShift(imm, 7, 4) - bits |= extractBitAndShift(imm, 3, 3) - bits |= extractBitAndShift(imm, 2, 2) - bits |= extractBitAndShift(imm, 1, 1) - bits |= extractBitAndShift(imm, 5, 0) - return bits << 2 -} - func encodeVset(as obj.As, rs1, rs2, rd uint32) uint32 { enc := encode(as) if enc == nil { @@ -1706,10 +2155,7 @@ func encodeVsetvl(ins *instruction) uint32 { func encodeRawIns(ins *instruction) uint32 { // Treat the raw value specially as a 32-bit unsigned integer. // Nobody wants to enter negative machine code. - if ins.imm < 0 || 1<<32 <= ins.imm { - panic(fmt.Sprintf("immediate %d cannot fit in 32 bits", ins.imm)) - } - return uint32(ins.imm) + return immU(ins.as, ins.imm, 32) } func EncodeBImmediate(imm int64) (int64, error) { @@ -1797,7 +2243,7 @@ func EncodeVectorType(vsew, vlmul, vtail, vmask int64) (int64, error) { type encoding struct { encode func(*instruction) uint32 // encode returns the machine code for an instruction validate func(*obj.Link, *instruction) // validate validates an instruction - length int // length of encoded instruction; 0 for pseudo-ops, 4 otherwise + length int // length of encoded instruction; 0 for pseudo-ops, 2 for compressed instructions, 4 otherwise } var ( @@ -1847,6 +2293,17 @@ var ( uEncoding = encoding{encode: encodeU, validate: validateU, length: 4} jEncoding = encoding{encode: encodeJ, validate: validateJ, length: 4} + // Compressed encodings. + caEncoding = encoding{encode: encodeCA, validate: validateCA, length: 2} + cbEncoding = encoding{encode: encodeCB, validate: validateCB, length: 2} + ciEncoding = encoding{encode: encodeCI, validate: validateCI, length: 2} + ciwEncoding = encoding{encode: encodeCIW, validate: validateCIW, length: 2} + cjEncoding = encoding{encode: encodeCJ, validate: validateCJ, length: 2} + clEncoding = encoding{encode: encodeCL, validate: validateCL, length: 2} + crEncoding = encoding{encode: encodeCR, validate: validateCR, length: 2} + csEncoding = encoding{encode: encodeCS, validate: validateCS, length: 2} + cssEncoding = encoding{encode: encodeCSS, validate: validateCSS, length: 2} + // Encodings for vector configuration setting instruction. vsetvliEncoding = encoding{encode: encodeVsetvli, validate: validateVsetvli, length: 4} vsetivliEncoding = encoding{encode: encodeVsetivli, validate: validateVsetivli, length: 4} @@ -2075,6 +2532,63 @@ var instructions = [ALAST & obj.AMask]instructionData{ // 21.7: Double-Precision Floating-Point Classify Instruction AFCLASSD & obj.AMask: {enc: rFIEncoding}, + // + // "C" Extension for Compressed Instructions, Version 2.0 + // + + // 26.3.1: Compressed Stack-Pointer-Based Loads and Stores + ACLWSP & obj.AMask: {enc: ciEncoding}, + ACLDSP & obj.AMask: {enc: ciEncoding}, + ACFLDSP & obj.AMask: {enc: ciEncoding}, + ACSWSP & obj.AMask: {enc: cssEncoding}, + ACSDSP & obj.AMask: {enc: cssEncoding}, + ACFSDSP & obj.AMask: {enc: cssEncoding}, + + // 26.3.2: Compressed Register-Based Loads and Stores + ACLW & obj.AMask: {enc: clEncoding}, + ACLD & obj.AMask: {enc: clEncoding}, + ACFLD & obj.AMask: {enc: clEncoding}, + ACSW & obj.AMask: {enc: csEncoding}, + ACSD & obj.AMask: {enc: csEncoding}, + ACFSD & obj.AMask: {enc: csEncoding}, + + // 26.4: Compressed Control Transfer Instructions + ACJ & obj.AMask: {enc: cjEncoding}, + ACJR & obj.AMask: {enc: crEncoding}, + ACJALR & obj.AMask: {enc: crEncoding}, + ACBEQZ & obj.AMask: {enc: cbEncoding}, + ACBNEZ & obj.AMask: {enc: cbEncoding}, + + // 26.5.1: Compressed Integer Constant-Generation Instructions + ACLI & obj.AMask: {enc: ciEncoding}, + ACLUI & obj.AMask: {enc: ciEncoding}, + + // 26.5.2: Compressed Integer Register-Immediate Operations + ACADDI & obj.AMask: {enc: ciEncoding, ternary: true}, + ACADDIW & obj.AMask: {enc: ciEncoding, ternary: true}, + ACADDI16SP & obj.AMask: {enc: ciEncoding, ternary: true}, + ACADDI4SPN & obj.AMask: {enc: ciwEncoding, ternary: true}, + ACSLLI & obj.AMask: {enc: ciEncoding, ternary: true}, + ACSRLI & obj.AMask: {enc: cbEncoding, ternary: true}, + ACSRAI & obj.AMask: {enc: cbEncoding, ternary: true}, + ACANDI & obj.AMask: {enc: cbEncoding, ternary: true}, + + // 26.5.3: Compressed Integer Register-Register Operations + ACMV & obj.AMask: {enc: crEncoding}, + ACADD & obj.AMask: {enc: crEncoding, immForm: ACADDI, ternary: true}, + ACAND & obj.AMask: {enc: caEncoding, immForm: ACANDI, ternary: true}, + ACOR & obj.AMask: {enc: caEncoding, ternary: true}, + ACXOR & obj.AMask: {enc: caEncoding, ternary: true}, + ACSUB & obj.AMask: {enc: caEncoding, ternary: true}, + ACADDW & obj.AMask: {enc: caEncoding, immForm: ACADDIW, ternary: true}, + ACSUBW & obj.AMask: {enc: caEncoding, ternary: true}, + + // 26.5.5: Compressed NOP Instruction + ACNOP & obj.AMask: {enc: ciEncoding}, + + // 26.5.6: Compressed Breakpoint Instruction + ACEBREAK & obj.AMask: {enc: crEncoding}, + // // "B" Extension for Bit Manipulation, Version 1.0.0 // @@ -2991,7 +3505,7 @@ func (ins *instruction) length() int { func (ins *instruction) validate(ctxt *obj.Link) { enc, err := encodingForAs(ins.as) if err != nil { - ctxt.Diag(err.Error()) + ctxt.Diag("%v", err) return } enc.validate(ctxt, ins) @@ -3026,7 +3540,7 @@ func instructionsForOpImmediate(p *obj.Prog, as obj.As, rs int16) []*instruction low, high, err := Split32BitImmediate(ins.imm) if err != nil { - p.Ctxt.Diag("%v: constant %d too large", p, ins.imm, err) + p.Ctxt.Diag("%v: constant %d too large: %v", p, ins.imm, err) return nil } if high == 0 { @@ -3558,7 +4072,7 @@ func instructionsForProg(p *obj.Prog) []*instruction { } switch ins.as { - case AJAL, AJALR: + case ACJALR, AJAL, AJALR: ins.rd, ins.rs1, ins.rs2 = uint32(p.From.Reg), uint32(p.To.Reg), obj.REG_NONE ins.imm = p.To.Offset @@ -3769,6 +4283,32 @@ func instructionsForProg(p *obj.Prog) []*instruction { ins.as = AFSGNJND ins.rs1 = uint32(p.From.Reg) + case ACLW, ACLD, ACFLD: + ins.rs1, ins.rs2 = ins.rs2, obj.REG_NONE + + case ACSW, ACSD, ACFSD: + ins.rs1, ins.rd = ins.rd, obj.REG_NONE + ins.imm = p.To.Offset + + case ACSWSP, ACSDSP, ACFSDSP: + ins.imm = p.To.Offset + + case ACANDI, ACSRLI, ACSRAI: + ins.rs1, ins.rd = ins.rd, ins.rs1 + + case ACBEQZ, ACBNEZ: + ins.rd, ins.rs1, ins.rs2 = obj.REG_NONE, uint32(p.From.Reg), obj.REG_NONE + ins.imm = p.To.Offset + + case ACJR: + ins.rd, ins.rs1 = obj.REG_NONE, uint32(p.To.Reg) + + case ACJ: + ins.imm = p.To.Offset + + case ACNOP: + ins.rd, ins.rs1 = REG_ZERO, REG_ZERO + case AROL, AROLW, AROR, ARORW: inss = instructionsForRotate(p, ins) @@ -3837,7 +4377,7 @@ func instructionsForProg(p *obj.Prog) []*instruction { if err != nil { p.Ctxt.Diag("%v: %v", p, err) } - ins.imm = int64(vtype) + ins.imm = vtype if ins.as == AVSETIVLI { if p.From.Type != obj.TYPE_CONST { p.Ctxt.Diag("%v: expected immediate value", p) @@ -4203,7 +4743,8 @@ func assemble(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { Add: p.To.Offset, }) } - case AJALR: + + case ACJALR, AJALR: if p.To.Sym != nil { ctxt.Diag("%v: unexpected AJALR with to symbol", p) } diff --git a/src/cmd/internal/obj/s390x/a.out.go b/src/cmd/internal/obj/s390x/a.out.go index dc715182f5c..caf5ec09358 100644 --- a/src/cmd/internal/obj/s390x/a.out.go +++ b/src/cmd/internal/obj/s390x/a.out.go @@ -156,7 +156,7 @@ var S390XDWARFRegisters = map[int16]int16{} func init() { // f assigns dwarfregisters[from:to by step] = (base):((to-from)/step+base) f := func(from, step, to, base int16) { - for r := int16(from); r <= to; r += step { + for r := from; r <= to; r += step { S390XDWARFRegisters[r] = (r-from)/step + base } } diff --git a/src/cmd/internal/obj/s390x/asmz.go b/src/cmd/internal/obj/s390x/asmz.go index 97de5a4a089..3706bb1b74e 100644 --- a/src/cmd/internal/obj/s390x/asmz.go +++ b/src/cmd/internal/obj/s390x/asmz.go @@ -3130,7 +3130,7 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { case ARISBLG, ARISBLGZ: opcode = op_RISBLG } - zRIE(_f, uint32(opcode), uint32(r1), uint32(r2), 0, uint32(i3), uint32(i4), 0, uint32(i5), asm) + zRIE(_f, opcode, uint32(r1), uint32(r2), 0, uint32(i3), uint32(i4), 0, uint32(i5), asm) case 15: // br/bl (reg) r := p.To.Reg @@ -3183,8 +3183,8 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { } switch p.As { case ASUB: - zRIL(_a, op_LGFI, uint32(regtmp(p)), uint32(v), asm) - zRRF(op_SLGRK, uint32(regtmp(p)), 0, uint32(p.To.Reg), uint32(r), asm) + zRIL(_a, op_LGFI, regtmp(p), uint32(v), asm) + zRRF(op_SLGRK, regtmp(p), 0, uint32(p.To.Reg), uint32(r), asm) case ASUBC: if r != p.To.Reg { zRRE(op_LGR, uint32(p.To.Reg), uint32(r), asm) @@ -3603,7 +3603,7 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { if opcode == op_MVI { opcode = op_MVIY } else { - zRXY(op_LAY, uint32(regtmp(p)), 0, uint32(r), uint32(d), asm) + zRXY(op_LAY, regtmp(p), 0, uint32(r), uint32(d), asm) r = int16(regtmp(p)) d = 0 } diff --git a/src/cmd/internal/obj/s390x/rotate.go b/src/cmd/internal/obj/s390x/rotate.go index 5407c8df110..d3cb44cabff 100644 --- a/src/cmd/internal/obj/s390x/rotate.go +++ b/src/cmd/internal/obj/s390x/rotate.go @@ -67,7 +67,7 @@ func (r RotateParams) RotateLeft(amount uint8) RotateParams { // OutMask provides a mask representing the selected bits. func (r RotateParams) OutMask() uint64 { // Note: z must be unsigned for bootstrap compiler - z := uint8(63-r.End+r.Start) & 63 // number of zero bits in mask + z := (63 - r.End + r.Start) & 63 // number of zero bits in mask return bits.RotateLeft64(^uint64(0)< 0 { s.Grow(int64(c) + int64(v)) - fillnop(s.P[c:], int(v)) + fillnop(s.P[c:], v) } p.Pc = int64(c) c += int32(v) @@ -3277,7 +3277,7 @@ func (ab *AsmBuf) Put(b []byte) { // Literal Z cases usually have "Zlit" in their name (Zlit, Zlitr_m, Zlitm_r). func (ab *AsmBuf) PutOpBytesLit(offset int, op *opBytes) { for int(op[offset]) != 0 { - ab.Put1(byte(op[offset])) + ab.Put1(op[offset]) offset++ } } diff --git a/src/cmd/internal/obj/x86/asm_test.go b/src/cmd/internal/obj/x86/asm_test.go index 458a91258a3..11655829838 100644 --- a/src/cmd/internal/obj/x86/asm_test.go +++ b/src/cmd/internal/obj/x86/asm_test.go @@ -286,7 +286,7 @@ func TestRegIndex(t *testing.T) { have := regIndex(int16(reg)) want := index if have != want { - regName := rconv(int(reg)) + regName := rconv(reg) t.Errorf("regIndex(%s):\nhave: %d\nwant: %d", regName, have, want) } diff --git a/src/cmd/internal/obj/x86/evex.go b/src/cmd/internal/obj/x86/evex.go index aa93cd8819a..12fe0347046 100644 --- a/src/cmd/internal/obj/x86/evex.go +++ b/src/cmd/internal/obj/x86/evex.go @@ -165,7 +165,7 @@ func evexZcase(zcase uint8) bool { return zcase > Zevex_first && zcase < Zevex_last } -// evexSuffixBits carries instruction EVEX suffix set flags. +// evexSuffix carries instruction EVEX suffix set flags. // // Examples: // diff --git a/src/cmd/internal/objabi/flag.go b/src/cmd/internal/objabi/flag.go index 8709c4e5cf5..32d71d05751 100644 --- a/src/cmd/internal/objabi/flag.go +++ b/src/cmd/internal/objabi/flag.go @@ -85,7 +85,7 @@ var buildID string // filled in by linker type versionFlag struct{} func (versionFlag) IsBoolFlag() bool { return true } -func (versionFlag) Get() interface{} { return nil } +func (versionFlag) Get() any { return nil } func (versionFlag) String() string { return "" } func (versionFlag) Set(s string) error { name := os.Args[0] @@ -148,7 +148,7 @@ func (c *count) Set(s string) error { return nil } -func (c *count) Get() interface{} { +func (c *count) Get() any { return int(*c) } @@ -206,8 +206,8 @@ func DecodeArg(arg string) string { type debugField struct { name string help string - concurrentOk bool // true if this field/flag is compatible with concurrent compilation - val interface{} // *int or *string + concurrentOk bool // true if this field/flag is compatible with concurrent compilation + val any // *int or *string } type DebugFlag struct { @@ -234,7 +234,7 @@ type DebugSSA func(phase, flag string, val int, valString string) string // // If debugSSA is non-nil, any debug flags of the form ssa/... will be // passed to debugSSA for processing. -func NewDebugFlag(debug interface{}, debugSSA DebugSSA) *DebugFlag { +func NewDebugFlag(debug any, debugSSA DebugSSA) *DebugFlag { flag := &DebugFlag{ tab: make(map[string]debugField), debugSSA: debugSSA, diff --git a/src/cmd/internal/objabi/pkgspecial.go b/src/cmd/internal/objabi/pkgspecial.go index 94efa6883bd..bf30f0cc7fa 100644 --- a/src/cmd/internal/objabi/pkgspecial.go +++ b/src/cmd/internal/objabi/pkgspecial.go @@ -55,7 +55,6 @@ var runtimePkgs = []string{ "internal/runtime/gc/scan", "internal/runtime/maps", "internal/runtime/math", - "internal/runtime/strconv", "internal/runtime/sys", "internal/runtime/syscall/linux", "internal/runtime/syscall/windows", @@ -71,6 +70,7 @@ var runtimePkgs = []string{ "internal/goexperiment", "internal/goos", "internal/profilerecord", + "internal/strconv", "internal/stringslite", } diff --git a/src/cmd/internal/objfile/elf.go b/src/cmd/internal/objfile/elf.go index 8923290cffe..6988cea9362 100644 --- a/src/cmd/internal/objfile/elf.go +++ b/src/cmd/internal/objfile/elf.go @@ -64,40 +64,26 @@ func (f *elfFile) symbols() ([]Sym, error) { return syms, nil } -func (f *elfFile) pcln() (textStart uint64, symtab, pclntab []byte, err error) { +func (f *elfFile) pcln() (textStart uint64, pclntab []byte, err error) { if sect := f.elf.Section(".text"); sect != nil { textStart = sect.Addr } - sect := f.elf.Section(".gosymtab") - if sect == nil { - // try .data.rel.ro.gosymtab, for PIE binaries - sect = f.elf.Section(".data.rel.ro.gosymtab") - } - if sect != nil { - if symtab, err = sect.Data(); err != nil { - return 0, nil, nil, err - } - } else { - // if both sections failed, try the symbol - symtab = f.symbolData("runtime.symtab", "runtime.esymtab") - } - - sect = f.elf.Section(".gopclntab") + sect := f.elf.Section(".gopclntab") if sect == nil { // try .data.rel.ro.gopclntab, for PIE binaries sect = f.elf.Section(".data.rel.ro.gopclntab") } if sect != nil { if pclntab, err = sect.Data(); err != nil { - return 0, nil, nil, err + return 0, nil, err } } else { // if both sections failed, try the symbol pclntab = f.symbolData("runtime.pclntab", "runtime.epclntab") } - return textStart, symtab, pclntab, nil + return textStart, pclntab, nil } func (f *elfFile) text() (textStart uint64, text []byte, err error) { diff --git a/src/cmd/internal/objfile/goobj.go b/src/cmd/internal/objfile/goobj.go index e8b8b522516..ec852d0669f 100644 --- a/src/cmd/internal/objfile/goobj.go +++ b/src/cmd/internal/objfile/goobj.go @@ -221,13 +221,13 @@ func (f *goobjFile) symbols() ([]Sym, error) { return syms, nil } -func (f *goobjFile) pcln() (textStart uint64, symtab, pclntab []byte, err error) { +func (f *goobjFile) pcln() (textStart uint64, pclntab []byte, err error) { // Should never be called. We implement Liner below, callers // should use that instead. - return 0, nil, nil, fmt.Errorf("pcln not available in go object file") + return 0, nil, fmt.Errorf("pcln not available in go object file") } -// Find returns the file name, line, and function data for the given pc. +// PCToLine returns the file name, line, and function data for the given pc. // Returns "",0,nil if unknown. // This function implements the Liner interface in preference to pcln() above. func (f *goobjFile) PCToLine(pc uint64) (string, int, *gosym.Func) { @@ -240,7 +240,7 @@ func (f *goobjFile) PCToLine(pc uint64) (string, int, *gosym.Func) { // We don't need the data for non-hashed symbols, yet. panic("not supported") } - i := uint32(s.SymIdx + uint32(r.NSym()+r.NHashed64def())) + i := s.SymIdx + uint32(r.NSym()+r.NHashed64def()) return r.BytesAt(r.DataOff(i), r.DataSize(i)) } @@ -325,7 +325,7 @@ func readvarint(p *[]byte) uint32 { // We treat the whole object file as the text section. func (f *goobjFile) text() (textStart uint64, text []byte, err error) { text = make([]byte, f.goobj.Size) - _, err = f.f.ReadAt(text, int64(f.goobj.Offset)) + _, err = f.f.ReadAt(text, f.goobj.Offset) return } diff --git a/src/cmd/internal/objfile/macho.go b/src/cmd/internal/objfile/macho.go index 8258145f26f..eaf665faee9 100644 --- a/src/cmd/internal/objfile/macho.go +++ b/src/cmd/internal/objfile/macho.go @@ -79,21 +79,16 @@ func (f *machoFile) symbols() ([]Sym, error) { return syms, nil } -func (f *machoFile) pcln() (textStart uint64, symtab, pclntab []byte, err error) { +func (f *machoFile) pcln() (textStart uint64, pclntab []byte, err error) { if sect := f.macho.Section("__text"); sect != nil { textStart = sect.Addr } - if sect := f.macho.Section("__gosymtab"); sect != nil { - if symtab, err = sect.Data(); err != nil { - return 0, nil, nil, err - } - } if sect := f.macho.Section("__gopclntab"); sect != nil { if pclntab, err = sect.Data(); err != nil { - return 0, nil, nil, err + return 0, nil, err } } - return textStart, symtab, pclntab, nil + return textStart, pclntab, nil } func (f *machoFile) text() (textStart uint64, text []byte, err error) { diff --git a/src/cmd/internal/objfile/objfile.go b/src/cmd/internal/objfile/objfile.go index ed9aae280e5..32e06dfd991 100644 --- a/src/cmd/internal/objfile/objfile.go +++ b/src/cmd/internal/objfile/objfile.go @@ -18,7 +18,7 @@ import ( type rawFile interface { symbols() (syms []Sym, err error) - pcln() (textStart uint64, symtab, pclntab []byte, err error) + pcln() (textStart uint64, pclntab []byte, err error) text() (textStart uint64, text []byte, err error) goarch() string loadAddress() (uint64, error) @@ -141,7 +141,7 @@ func (e *Entry) PCLineTable() (Liner, error) { return pcln, nil } // Otherwise, read the pcln tables and build a Liner out of that. - textStart, symtab, pclntab, err := e.raw.pcln() + textStart, pclntab, err := e.raw.pcln() if err != nil { return nil, err } @@ -154,7 +154,7 @@ func (e *Entry) PCLineTable() (Liner, error) { } } } - return gosym.NewTable(symtab, gosym.NewLineTable(pclntab, textStart)) + return gosym.NewTable(nil, gosym.NewLineTable(pclntab, textStart)) } func (e *Entry) Text() (uint64, []byte, error) { diff --git a/src/cmd/internal/objfile/pe.go b/src/cmd/internal/objfile/pe.go index c5c08264a9c..e94821298f1 100644 --- a/src/cmd/internal/objfile/pe.go +++ b/src/cmd/internal/objfile/pe.go @@ -90,10 +90,10 @@ func (f *peFile) symbols() ([]Sym, error) { return syms, nil } -func (f *peFile) pcln() (textStart uint64, symtab, pclntab []byte, err error) { +func (f *peFile) pcln() (textStart uint64, pclntab []byte, err error) { imageBase, err := f.imageBase() if err != nil { - return 0, nil, nil, err + return 0, nil, err } if sect := f.pe.Section(".text"); sect != nil { @@ -104,17 +104,10 @@ func (f *peFile) pcln() (textStart uint64, symtab, pclntab []byte, err error) { // TODO: Remove code looking for the old symbols when we no longer care about 1.3. var err2 error if pclntab, err2 = loadPETable(f.pe, "pclntab", "epclntab"); err2 != nil { - return 0, nil, nil, err + return 0, nil, err } } - if symtab, err = loadPETable(f.pe, "runtime.symtab", "runtime.esymtab"); err != nil { - // Same as above. - var err2 error - if symtab, err2 = loadPETable(f.pe, "symtab", "esymtab"); err2 != nil { - return 0, nil, nil, err - } - } - return textStart, symtab, pclntab, nil + return textStart, pclntab, nil } func (f *peFile) text() (textStart uint64, text []byte, err error) { diff --git a/src/cmd/internal/objfile/plan9obj.go b/src/cmd/internal/objfile/plan9obj.go index c91970762c7..edd40230cec 100644 --- a/src/cmd/internal/objfile/plan9obj.go +++ b/src/cmd/internal/objfile/plan9obj.go @@ -71,24 +71,17 @@ func (f *plan9File) symbols() ([]Sym, error) { return syms, nil } -func (f *plan9File) pcln() (textStart uint64, symtab, pclntab []byte, err error) { +func (f *plan9File) pcln() (textStart uint64, pclntab []byte, err error) { textStart = f.plan9.LoadAddress + f.plan9.HdrSize if pclntab, err = loadPlan9Table(f.plan9, "runtime.pclntab", "runtime.epclntab"); err != nil { // We didn't find the symbols, so look for the names used in 1.3 and earlier. // TODO: Remove code looking for the old symbols when we no longer care about 1.3. var err2 error if pclntab, err2 = loadPlan9Table(f.plan9, "pclntab", "epclntab"); err2 != nil { - return 0, nil, nil, err + return 0, nil, err } } - if symtab, err = loadPlan9Table(f.plan9, "runtime.symtab", "runtime.esymtab"); err != nil { - // Same as above. - var err2 error - if symtab, err2 = loadPlan9Table(f.plan9, "symtab", "esymtab"); err2 != nil { - return 0, nil, nil, err - } - } - return textStart, symtab, pclntab, nil + return textStart, pclntab, nil } func (f *plan9File) text() (textStart uint64, text []byte, err error) { diff --git a/src/cmd/internal/objfile/xcoff.go b/src/cmd/internal/objfile/xcoff.go index d6df4db8f01..85928621f18 100644 --- a/src/cmd/internal/objfile/xcoff.go +++ b/src/cmd/internal/objfile/xcoff.go @@ -44,7 +44,7 @@ func (f *xcoffFile) symbols() ([]Sym, error) { case N_DEBUG: sym.Code = '?' default: - if s.SectionNumber < 0 || len(f.xcoff.Sections) < int(s.SectionNumber) { + if s.SectionNumber < 0 || len(f.xcoff.Sections) < s.SectionNumber { return nil, fmt.Errorf("invalid section number in symbol table") } sect := f.xcoff.Sections[s.SectionNumber-1] @@ -87,15 +87,14 @@ func (f *xcoffFile) symbols() ([]Sym, error) { return syms, nil } -func (f *xcoffFile) pcln() (textStart uint64, symtab, pclntab []byte, err error) { +func (f *xcoffFile) pcln() (textStart uint64, pclntab []byte, err error) { if sect := f.xcoff.Section(".text"); sect != nil { textStart = sect.VirtualAddress } if pclntab, err = loadXCOFFTable(f.xcoff, "runtime.pclntab", "runtime.epclntab"); err != nil { - return 0, nil, nil, err + return 0, nil, err } - symtab, _ = loadXCOFFTable(f.xcoff, "runtime.symtab", "runtime.esymtab") // ignore error, this symbol is not useful anyway - return textStart, symtab, pclntab, nil + return textStart, pclntab, nil } func (f *xcoffFile) text() (textStart uint64, text []byte, err error) { @@ -116,7 +115,7 @@ func findXCOFFSymbol(f *xcoff.File, name string) (*xcoff.Symbol, error) { if s.SectionNumber <= 0 { return nil, fmt.Errorf("symbol %s: invalid section number %d", name, s.SectionNumber) } - if len(f.Sections) < int(s.SectionNumber) { + if len(f.Sections) < s.SectionNumber { return nil, fmt.Errorf("symbol %s: section number %d is larger than max %d", name, s.SectionNumber, len(f.Sections)) } return s, nil diff --git a/src/cmd/internal/robustio/robustio_darwin.go b/src/cmd/internal/robustio/robustio_darwin.go index 99fd8ebc2ff..69ea2479308 100644 --- a/src/cmd/internal/robustio/robustio_darwin.go +++ b/src/cmd/internal/robustio/robustio_darwin.go @@ -13,9 +13,6 @@ const errFileNotFound = syscall.ENOENT // isEphemeralError returns true if err may be resolved by waiting. func isEphemeralError(err error) bool { - var errno syscall.Errno - if errors.As(err, &errno) { - return errno == errFileNotFound - } - return false + errno, ok := errors.AsType[syscall.Errno](err) + return ok && errno == errFileNotFound } diff --git a/src/cmd/internal/robustio/robustio_flaky.go b/src/cmd/internal/robustio/robustio_flaky.go index c56e36ca624..ec1a2daea65 100644 --- a/src/cmd/internal/robustio/robustio_flaky.go +++ b/src/cmd/internal/robustio/robustio_flaky.go @@ -31,8 +31,7 @@ func retry(f func() (err error, mayRetry bool)) error { return err } - var errno syscall.Errno - if errors.As(err, &errno) && (lowestErrno == 0 || errno < lowestErrno) { + if errno, ok := errors.AsType[syscall.Errno](err); ok && (lowestErrno == 0 || errno < lowestErrno) { bestErr = err lowestErrno = errno } else if bestErr == nil { diff --git a/src/cmd/internal/robustio/robustio_windows.go b/src/cmd/internal/robustio/robustio_windows.go index 687dcb66f83..ad46ec5cfeb 100644 --- a/src/cmd/internal/robustio/robustio_windows.go +++ b/src/cmd/internal/robustio/robustio_windows.go @@ -14,8 +14,7 @@ const errFileNotFound = syscall.ERROR_FILE_NOT_FOUND // isEphemeralError returns true if err may be resolved by waiting. func isEphemeralError(err error) bool { - var errno syscall.Errno - if errors.As(err, &errno) { + if errno, ok := errors.AsType[syscall.Errno](err); ok { switch errno { case syscall.ERROR_ACCESS_DENIED, syscall.ERROR_FILE_NOT_FOUND, diff --git a/src/cmd/internal/script/engine.go b/src/cmd/internal/script/engine.go index eb9344f6e2a..05b9433d3ec 100644 --- a/src/cmd/internal/script/engine.go +++ b/src/cmd/internal/script/engine.go @@ -55,6 +55,8 @@ import ( "errors" "fmt" "io" + "maps" + "slices" "sort" "strings" "time" @@ -185,7 +187,7 @@ func (e *Engine) Execute(s *State, file string, script *bufio.Reader, log io.Wri var lineno int lineErr := func(err error) error { - if errors.As(err, new(*CommandError)) { + if _, ok := errors.AsType[*CommandError](err); ok { return err } return fmt.Errorf("%s:%d: %w", file, lineno, err) @@ -283,7 +285,7 @@ func (e *Engine) Execute(s *State, file string, script *bufio.Reader, log io.Wri // Run the command. err = e.runCommand(s, cmd, impl) if err != nil { - if stop := (stopError{}); errors.As(err, &stop) { + if stop, ok := errors.AsType[stopError](err); ok { // Since the 'stop' command halts execution of the entire script, // log its message separately from the section in which it appears. err = endSection(true) @@ -518,7 +520,7 @@ func (e *Engine) conditionsActive(s *State, conds []condition) (bool, error) { if ok { impl = e.Conds[prefix] if impl == nil { - return false, fmt.Errorf("unknown condition prefix %q", prefix) + return false, fmt.Errorf("unknown condition prefix %q; known: %v", prefix, slices.Collect(maps.Keys(e.Conds))) } if !impl.Usage().Prefix { return false, fmt.Errorf("condition %q cannot be used with a suffix", prefix) @@ -607,13 +609,13 @@ func checkStatus(cmd *command, err error) error { return nil } - if s := (stopError{}); errors.As(err, &s) { + if _, ok := errors.AsType[stopError](err); ok { // This error originated in the Stop command. // Propagate it as-is. return cmdError(cmd, err) } - if w := (waitError{}); errors.As(err, &w) { + if _, ok := errors.AsType[waitError](err); ok { // This error was surfaced from a background process by a call to Wait. // Add a call frame for Wait itself, but ignore its "want" field. // (Wait itself cannot fail to wait on commands or else it would leak diff --git a/src/cmd/internal/script/scripttest/scripttest.go b/src/cmd/internal/script/scripttest/scripttest.go index bace662a672..349201fd188 100644 --- a/src/cmd/internal/script/scripttest/scripttest.go +++ b/src/cmd/internal/script/scripttest/scripttest.go @@ -89,7 +89,7 @@ func Run(t testing.TB, e *script.Engine, s *script.State, filename string, testS return e.Execute(s, filename, bufio.NewReader(testScript), log) }() - if skip := (skipError{}); errors.As(err, &skip) { + if skip, ok := errors.AsType[skipError](err); ok { if skip.msg == "" { t.Skip("SKIP") } else { diff --git a/src/cmd/internal/test2json/test2json.go b/src/cmd/internal/test2json/test2json.go index d08ef389f82..f28051e1771 100644 --- a/src/cmd/internal/test2json/test2json.go +++ b/src/cmd/internal/test2json/test2json.go @@ -38,6 +38,7 @@ type event struct { FailedBuild string `json:",omitempty"` Key string `json:",omitempty"` Value string `json:",omitempty"` + Path string `json:",omitempty"` } // textBytes is a hack to get JSON to emit a []byte as a string @@ -180,6 +181,7 @@ var ( []byte("=== FAIL "), []byte("=== SKIP "), []byte("=== ATTR "), + []byte("=== ARTIFACTS "), } reports = [][]byte{ @@ -251,7 +253,6 @@ func (c *Converter) handleInputLine(line []byte) { // "=== RUN " // "=== PAUSE " // "=== CONT " - actionColon := false origLine := line ok := false indent := 0 @@ -273,7 +274,6 @@ func (c *Converter) handleInputLine(line []byte) { } for _, magic := range reports { if bytes.HasPrefix(line, magic) { - actionColon = true ok = true break } @@ -296,16 +296,11 @@ func (c *Converter) handleInputLine(line []byte) { return } - // Parse out action and test name. - i := 0 - if actionColon { - i = bytes.IndexByte(line, ':') + 1 - } - if i == 0 { - i = len(updates[0]) - } - action := strings.ToLower(strings.TrimSuffix(strings.TrimSpace(string(line[4:i])), ":")) - name := strings.TrimSpace(string(line[i:])) + // Parse out action and test name from "=== ACTION: Name". + action, name, _ := strings.Cut(string(line[len("=== "):]), " ") + action = strings.TrimSuffix(action, ":") + action = strings.ToLower(action) + name = strings.TrimSpace(name) e := &event{Action: action} if line[0] == '-' { // PASS or FAIL report @@ -336,7 +331,10 @@ func (c *Converter) handleInputLine(line []byte) { c.output.write(origLine) return } - if action == "attr" { + switch action { + case "artifacts": + name, e.Path, _ = strings.Cut(name, " ") + case "attr": var rest string name, rest, _ = strings.Cut(name, " ") e.Key, e.Value, _ = strings.Cut(rest, " ") diff --git a/src/cmd/internal/testdir/testdir_test.go b/src/cmd/internal/testdir/testdir_test.go index 9731d9e91f3..c7060898778 100644 --- a/src/cmd/internal/testdir/testdir_test.go +++ b/src/cmd/internal/testdir/testdir_test.go @@ -1488,7 +1488,7 @@ var ( // "\s*,\s*` matches " , " // second reMatchCheck matches "`SUB`" // ")*)" closes started groups; "*" means that there might be other elements in the comma-separated list - rxAsmPlatform = regexp.MustCompile(`(\w+)(/[\w.]+)?(/\w*)?\s*:\s*(` + reMatchCheck + `(?:\s*,\s*` + reMatchCheck + `)*)`) + rxAsmPlatform = regexp.MustCompile(`(\w+)(/[\w.]+)?(/\w*)?\s*:\s*(` + reMatchCheck + `(?:\s+` + reMatchCheck + `)*)`) // Regexp to extract a single opcoded check rxAsmCheck = regexp.MustCompile(reMatchCheck) @@ -1701,6 +1701,9 @@ func (t test) asmCheck(outStr string, fn string, env buildEnv, fullops map[strin } srcFileLine, asm := matches[1], matches[2] + // Replace tabs with single spaces to make matches easier to write. + asm = strings.ReplaceAll(asm, "\t", " ") + // Associate the original file/line information to the current // function in the output; it will be useful to dump it in case // of error. @@ -1752,11 +1755,11 @@ func (t test) asmCheck(outStr string, fn string, env buildEnv, fullops map[strin } if o.negative { - fmt.Fprintf(&errbuf, "%s:%d: %s: wrong opcode found: %q\n", t.goFileName(), o.line, env, o.opcode.String()) + fmt.Fprintf(&errbuf, "%s:%d: %s: wrong opcode found: %#q\n", t.goFileName(), o.line, env, o.opcode.String()) } else if o.expected > 0 { - fmt.Fprintf(&errbuf, "%s:%d: %s: wrong number of opcodes: %q\n", t.goFileName(), o.line, env, o.opcode.String()) + fmt.Fprintf(&errbuf, "%s:%d: %s: wrong number of opcodes: %#q\n", t.goFileName(), o.line, env, o.opcode.String()) } else { - fmt.Fprintf(&errbuf, "%s:%d: %s: opcode not found: %q\n", t.goFileName(), o.line, env, o.opcode.String()) + fmt.Fprintf(&errbuf, "%s:%d: %s: opcode not found: %#q\n", t.goFileName(), o.line, env, o.opcode.String()) } } return errors.New(errbuf.String()) diff --git a/src/cmd/link/internal/amd64/asm.go b/src/cmd/link/internal/amd64/asm.go index 5424de800ca..d27342f23fb 100644 --- a/src/cmd/link/internal/amd64/asm.go +++ b/src/cmd/link/internal/amd64/asm.go @@ -407,7 +407,7 @@ func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loade } else { ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ)) } - rela.AddAddrPlus(target.Arch, targ, int64(r.Add())) + rela.AddAddrPlus(target.Arch, targ, r.Add()) // Not mark r done here. So we still apply it statically, // so in the file content we'll also have the right offset // to the relocation target. So it can be examined statically diff --git a/src/cmd/link/internal/arm/asm.go b/src/cmd/link/internal/arm/asm.go index a113196d10c..248ec648bcc 100644 --- a/src/cmd/link/internal/arm/asm.go +++ b/src/cmd/link/internal/arm/asm.go @@ -400,7 +400,7 @@ func trampoline(ctxt *ld.Link, ldr *loader.Loader, ri int, rs, s loader.Sym) { for i := 0; ; i++ { oName := ldr.SymName(rs) name := oName + fmt.Sprintf("%+d-tramp%d", offset, i) - tramp = ldr.LookupOrCreateSym(name, int(ldr.SymVersion(rs))) + tramp = ldr.LookupOrCreateSym(name, ldr.SymVersion(rs)) ldr.SetAttrReachable(tramp, true) if ldr.SymType(tramp) == sym.SDYNIMPORT { // don't reuse trampoline defined in other module diff --git a/src/cmd/link/internal/arm64/asm.go b/src/cmd/link/internal/arm64/asm.go index 8d8ea8ac542..7c4546fb179 100644 --- a/src/cmd/link/internal/arm64/asm.go +++ b/src/cmd/link/internal/arm64/asm.go @@ -457,7 +457,7 @@ func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loade } else { ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ)) } - rela.AddAddrPlus(target.Arch, targ, int64(r.Add())) + rela.AddAddrPlus(target.Arch, targ, r.Add()) // Not mark r done here. So we still apply it statically, // so in the file content we'll also have the right offset // to the relocation target. So it can be examined statically @@ -876,7 +876,7 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade if r.Siz() == 8 { val = r.Add() } else if target.IsBigEndian() { - val = int64(uint32(val)) | int64(r.Add())<<32 + val = int64(uint32(val)) | r.Add()<<32 } else { val = val>>32<<32 | int64(uint32(r.Add())) } @@ -972,13 +972,13 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade // R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 // turn ADRP to MOVZ - o0 = 0xd2a00000 | uint32(o0&0x1f) | (uint32((v>>16)&0xffff) << 5) + o0 = 0xd2a00000 | o0&0x1f | (uint32((v>>16)&0xffff) << 5) // R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC // turn LD64 to MOVK if v&3 != 0 { ldr.Errorf(s, "invalid address: %x for relocation type: R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", v) } - o1 = 0xf2800000 | uint32(o1&0x1f) | (uint32(v&0xffff) << 5) + o1 = 0xf2800000 | o1&0x1f | (uint32(v&0xffff) << 5) // when laid out, the instruction order must always be o0, o1. if target.IsBigEndian() { @@ -1027,25 +1027,35 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade } case objabi.R_ARM64_PCREL: + // When targetting Windows, the instruction immediate field is cleared + // before applying relocations, as it contains the offset as bytes + // instead of pages. It has already been accounted for in loadpe.Load + // by adjusting r.Add(). if (val>>24)&0x9f == 0x90 { - // R_AARCH64_ADR_PREL_PG_HI21 + // ELF R_AARCH64_ADR_PREL_PG_HI21, or Mach-O ARM64_RELOC_PAGE21, or PE IMAGE_REL_ARM64_PAGEBASE_REL21 // patch instruction: adrp t := ldr.SymAddr(rs) + r.Add() - ((ldr.SymValue(s) + int64(r.Off())) &^ 0xfff) if t >= 1<<32 || t < -1<<32 { ldr.Errorf(s, "program too large, address relocation distance = %d", t) } o0 := (uint32((t>>12)&3) << 29) | (uint32((t>>12>>2)&0x7ffff) << 5) + if target.IsWindows() { + val &^= 3<<29 | 0x7ffff<<5 + } return val | int64(o0), noExtReloc, isOk } else if (val>>24)&0x9f == 0x91 { - // ELF R_AARCH64_ADD_ABS_LO12_NC or Mach-O ARM64_RELOC_PAGEOFF12 + // ELF R_AARCH64_ADD_ABS_LO12_NC, or Mach-O ARM64_RELOC_PAGEOFF12, or PE IMAGE_REL_ARM64_PAGEOFFSET_12A // patch instruction: add t := ldr.SymAddr(rs) + r.Add() - ((ldr.SymValue(s) + int64(r.Off())) &^ 0xfff) o1 := uint32(t&0xfff) << 10 + if target.IsWindows() { + val &^= 0xfff << 10 + } return val | int64(o1), noExtReloc, isOk } else if (val>>24)&0x3b == 0x39 { - // Mach-O ARM64_RELOC_PAGEOFF12 + // Mach-O ARM64_RELOC_PAGEOFF12 or PE IMAGE_REL_ARM64_PAGEOFFSET_12L // patch ldr/str(b/h/w/d/q) (integer or vector) instructions, which have different scaling factors. - // Mach-O uses same relocation type for them. + // Mach-O and PE use same relocation type for them. shift := uint32(val) >> 30 if shift == 0 && (val>>20)&0x048 == 0x048 { // 128-bit vector load shift = 4 @@ -1055,6 +1065,9 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade ldr.Errorf(s, "invalid address: %x for relocation type: ARM64_RELOC_PAGEOFF12", t) } o1 := (uint32(t&0xfff) >> shift) << 10 + if target.IsWindows() { + val &^= 0xfff << 10 + } return val | int64(o1), noExtReloc, isOk } else { ldr.Errorf(s, "unsupported instruction for %x R_ARM64_PCREL", val) @@ -1372,7 +1385,7 @@ func trampoline(ctxt *ld.Link, ldr *loader.Loader, ri int, rs, s loader.Sym) { for i := 0; ; i++ { oName := ldr.SymName(rs) name := oName + fmt.Sprintf("%+x-tramp%d", r.Add(), i) - tramp = ldr.LookupOrCreateSym(name, int(ldr.SymVersion(rs))) + tramp = ldr.LookupOrCreateSym(name, ldr.SymVersion(rs)) ldr.SetAttrReachable(tramp, true) if ldr.SymType(tramp) == sym.SDYNIMPORT { // don't reuse trampoline defined in other module diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go index a49cae0d952..e7e202fc1f6 100644 --- a/src/cmd/link/internal/ld/data.go +++ b/src/cmd/link/internal/ld/data.go @@ -427,7 +427,7 @@ func (st *relocSymState) relocsym(s loader.Sym, P []byte) { } case objabi.R_DWTXTADDR_U1, objabi.R_DWTXTADDR_U2, objabi.R_DWTXTADDR_U3, objabi.R_DWTXTADDR_U4: unit := ldr.SymUnit(rs) - if idx, ok := unit.Addrs[sym.LoaderSym(rs)]; ok { + if idx, ok := unit.Addrs[rs]; ok { o = int64(idx) } else { st.err.Errorf(s, "missing .debug_addr index relocation target %s", ldr.SymName(rs)) @@ -512,7 +512,7 @@ func (st *relocSymState) relocsym(s loader.Sym, P []byte) { case objabi.R_ADDRCUOFF: // debug_range and debug_loc elements use this relocation type to get an // offset from the start of the compile unit. - o = ldr.SymValue(rs) + r.Add() - ldr.SymValue(loader.Sym(ldr.SymUnit(rs).Textp[0])) + o = ldr.SymValue(rs) + r.Add() - ldr.SymValue(ldr.SymUnit(rs).Textp[0]) // r.Sym() can be 0 when CALL $(constant) is transformed from absolute PC to relative PC call. case objabi.R_GOTPCREL: @@ -560,7 +560,7 @@ func (st *relocSymState) relocsym(s loader.Sym, P []byte) { if rst != sym.SHOSTOBJ { o += int64(uint64(ldr.SymValue(rs)) - ldr.SymSect(rs).Vaddr) } - o -= int64(off) // relative to section offset, not symbol + o -= off // relative to section offset, not symbol } } else { o += int64(siz) @@ -698,7 +698,7 @@ func extreloc(ctxt *Link, ldr *loader.Loader, s loader.Sym, r loader.Reloc) (loa return rr, false } rs := r.Sym() - rr.Xsym = loader.Sym(ldr.SymSect(rs).Sym) + rr.Xsym = ldr.SymSect(rs).Sym rr.Xadd = r.Add() + ldr.SymValue(rs) - int64(ldr.SymSect(rs).Vaddr) // r.Sym() can be 0 when CALL $(constant) is transformed from absolute PC to relative PC call. @@ -913,6 +913,23 @@ func windynrelocsym(ctxt *Link, rel *loader.SymbolBuilder, s loader.Sym) error { rel.AddPCRelPlus(ctxt.Arch, targ, 0) rel.AddUint8(0x90) rel.AddUint8(0x90) + case sys.ARM64: + // adrp x16, addr + rel.AddUint32(ctxt.Arch, 0x90000010) + r, _ := rel.AddRel(objabi.R_ARM64_PCREL) + r.SetOff(int32(rel.Size() - 4)) + r.SetSiz(4) + r.SetSym(targ) + + // ldr x17, [x16, ] + rel.AddUint32(ctxt.Arch, 0xf9400211) + r, _ = rel.AddRel(objabi.R_ARM64_PCREL) + r.SetOff(int32(rel.Size() - 4)) + r.SetSiz(4) + r.SetSym(targ) + + // br x17 + rel.AddUint32(ctxt.Arch, 0xd61f0220) } } else if tplt >= 0 { if su == nil { @@ -1564,7 +1581,7 @@ func (state *dodataState) makeRelroForSharedLib(target *Link) { // the relro data. isRelro = true } - case sym.SGOFUNC: + case sym.SGOFUNC, sym.SPCLNTAB: // The only SGOFUNC symbols that contain relocations are .stkobj, // and their relocations are of type objabi.R_ADDROFF, // which always get resolved during linking. @@ -1611,9 +1628,9 @@ type dodataState struct { // Link context ctxt *Link // Data symbols bucketed by type. - data [sym.SXREF][]loader.Sym + data [sym.SFirstUnallocated][]loader.Sym // Max alignment for each flavor of data symbol. - dataMaxAlign [sym.SXREF]int32 + dataMaxAlign [sym.SFirstUnallocated]int32 // Overridden sym type symGroupType []sym.SymKind // Current data size so far. @@ -1670,7 +1687,7 @@ func (ctxt *Link) dodata(symGroupType []sym.SymKind) { st := state.symType(s) - if st <= sym.STEXTFIPSEND || st >= sym.SXREF { + if st <= sym.STEXTEND || st >= sym.SFirstUnallocated { continue } state.data[st] = append(state.data[st], s) @@ -2102,6 +2119,21 @@ func (state *dodataState) allocateDataSections(ctxt *Link) { } } + /* gopclntab */ + sect = state.allocateNamedSectionAndAssignSyms(segro, ".gopclntab", sym.SPCLNTAB, sym.SRODATA, 04) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.pclntab", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.pcheader", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.funcnametab", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.cutab", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.filetab", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.pctab", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.functab", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.epclntab", 0), sect) + setCarrierSize(sym.SPCLNTAB, int64(sect.Length)) + if ctxt.HeadType == objabi.Haix { + xcoffUpdateOuterSize(ctxt, int64(sect.Length), sym.SPCLNTAB) + } + /* read-only ELF, Mach-O sections */ state.allocateSingleSymSections(segro, sym.SELFROSECT, sym.SRODATA, 04) @@ -2221,37 +2253,17 @@ func (state *dodataState) allocateDataSections(ctxt *Link) { state.checkdatsize(sym.SITABLINK) sect.Length = uint64(state.datsize) - sect.Vaddr - /* gosymtab */ - sect = state.allocateNamedSectionAndAssignSyms(seg, genrelrosecname(".gosymtab"), sym.SSYMTAB, sym.SRODATA, relroSecPerm) - ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.symtab", 0), sect) - ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.esymtab", 0), sect) - - /* gopclntab */ - sect = state.allocateNamedSectionAndAssignSyms(seg, genrelrosecname(".gopclntab"), sym.SPCLNTAB, sym.SRODATA, relroSecPerm) - ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.pclntab", 0), sect) - ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.pcheader", 0), sect) - ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.funcnametab", 0), sect) - ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.cutab", 0), sect) - ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.filetab", 0), sect) - ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.pctab", 0), sect) - ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.functab", 0), sect) - ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.epclntab", 0), sect) - setCarrierSize(sym.SPCLNTAB, int64(sect.Length)) - if ctxt.HeadType == objabi.Haix { - xcoffUpdateOuterSize(ctxt, int64(sect.Length), sym.SPCLNTAB) - } - // 6g uses 4-byte relocation offsets, so the entire segment must fit in 32 bits. if state.datsize != int64(uint32(state.datsize)) { Errorf("read-only data segment too large: %d", state.datsize) } siz := 0 - for symn := sym.SELFRXSECT; symn < sym.SXREF; symn++ { + for symn := sym.SELFRXSECT; symn < sym.SFirstUnallocated; symn++ { siz += len(state.data[symn]) } ctxt.datap = make([]loader.Sym, 0, siz) - for symn := sym.SELFRXSECT; symn < sym.SXREF; symn++ { + for symn := sym.SELFRXSECT; symn < sym.SFirstUnallocated; symn++ { ctxt.datap = append(ctxt.datap, state.data[symn]...) } } @@ -2268,7 +2280,7 @@ func (state *dodataState) allocateDwarfSections(ctxt *Link) { s := dwarfp[i].secSym() sect := state.allocateNamedDataSection(&Segdwarf, ldr.SymName(s), []sym.SymKind{}, 04) ldr.SetSymSect(s, sect) - sect.Sym = sym.LoaderSym(s) + sect.Sym = s curType := ldr.SymType(s) state.setSymType(s, sym.SRODATA) ldr.SetSymValue(s, int64(uint64(state.datsize)-sect.Vaddr)) @@ -2835,9 +2847,6 @@ func (ctxt *Link) address() []*sym.Segment { // will be such that the last page of the text segment will be // mapped twice, once r-x and once starting out rw- and, after // relocation processing, changed to r--. - // - // Ideally the last page of the text segment would not be - // writable even for this short period. va = uint64(Rnd(int64(va), *FlagRound)) order = append(order, &Segrodata) @@ -2971,7 +2980,6 @@ func (ctxt *Link) address() []*sym.Segment { ldr := ctxt.loader var ( rodata = ldr.SymSect(ldr.LookupOrCreateSym("runtime.rodata", 0)) - symtab = ldr.SymSect(ldr.LookupOrCreateSym("runtime.symtab", 0)) pclntab = ldr.SymSect(ldr.LookupOrCreateSym("runtime.pclntab", 0)) types = ldr.SymSect(ldr.LookupOrCreateSym("runtime.types", 0)) ) @@ -3051,8 +3059,6 @@ func (ctxt *Link) address() []*sym.Segment { ctxt.xdefine("runtime.egcbss", sym.SRODATA, ldr.SymAddr(s)+ldr.SymSize(s)) ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.egcbss", 0), ldr.SymSect(s)) - ctxt.xdefine("runtime.symtab", sym.SRODATA, int64(symtab.Vaddr)) - ctxt.xdefine("runtime.esymtab", sym.SRODATA, int64(symtab.Vaddr+symtab.Length)) ctxt.xdefine("runtime.pclntab", sym.SRODATA, int64(pclntab.Vaddr)) ctxt.defineInternal("runtime.pcheader", sym.SRODATA) ctxt.defineInternal("runtime.funcnametab", sym.SRODATA) diff --git a/src/cmd/link/internal/ld/dwarf.go b/src/cmd/link/internal/ld/dwarf.go index 0003938ef2e..31de34aff45 100644 --- a/src/cmd/link/internal/ld/dwarf.go +++ b/src/cmd/link/internal/ld/dwarf.go @@ -100,7 +100,7 @@ func (c dwctxt) AddString(s dwarf.Sym, v string) { dsu.Addstring(v) } -func (c dwctxt) AddAddress(s dwarf.Sym, data interface{}, value int64) { +func (c dwctxt) AddAddress(s dwarf.Sym, data any, value int64) { ds := loader.Sym(s.(dwSym)) dsu := c.ldr.MakeSymbolUpdater(ds) if value != 0 { @@ -110,7 +110,7 @@ func (c dwctxt) AddAddress(s dwarf.Sym, data interface{}, value int64) { dsu.AddAddrPlus(c.arch, tgtds, value) } -func (c dwctxt) AddCURelativeAddress(s dwarf.Sym, data interface{}, value int64) { +func (c dwctxt) AddCURelativeAddress(s dwarf.Sym, data any, value int64) { ds := loader.Sym(s.(dwSym)) dsu := c.ldr.MakeSymbolUpdater(ds) if value != 0 { @@ -120,7 +120,7 @@ func (c dwctxt) AddCURelativeAddress(s dwarf.Sym, data interface{}, value int64) dsu.AddCURelativeAddrPlus(c.arch, tgtds, value) } -func (c dwctxt) AddSectionOffset(s dwarf.Sym, size int, t interface{}, ofs int64) { +func (c dwctxt) AddSectionOffset(s dwarf.Sym, size int, t any, ofs int64) { ds := loader.Sym(s.(dwSym)) dsu := c.ldr.MakeSymbolUpdater(ds) tds := loader.Sym(t.(dwSym)) @@ -132,7 +132,7 @@ func (c dwctxt) AddSectionOffset(s dwarf.Sym, size int, t interface{}, ofs int64 dsu.AddSymRef(c.arch, tds, ofs, objabi.R_ADDROFF, size) } -func (c dwctxt) AddDWARFAddrSectionOffset(s dwarf.Sym, t interface{}, ofs int64) { +func (c dwctxt) AddDWARFAddrSectionOffset(s dwarf.Sym, t any, ofs int64) { size := 4 if isDwarf64(c.linkctxt) { size = 8 @@ -148,14 +148,14 @@ func (c dwctxt) AddDWARFAddrSectionOffset(s dwarf.Sym, t interface{}, ofs int64) dsu.AddSymRef(c.arch, tds, ofs, objabi.R_DWARFSECREF, size) } -func (c dwctxt) AddIndirectTextRef(s dwarf.Sym, t interface{}) { +func (c dwctxt) AddIndirectTextRef(s dwarf.Sym, t any) { ds := loader.Sym(s.(dwSym)) dsu := c.ldr.MakeSymbolUpdater(ds) tds := loader.Sym(t.(dwSym)) dsu.AddSymRef(c.arch, tds, 0, objabi.R_DWTXTADDR_U4, 4) } -func (c dwctxt) Logf(format string, args ...interface{}) { +func (c dwctxt) Logf(format string, args ...any) { c.linkctxt.Logf(format, args...) } @@ -239,7 +239,7 @@ var dwtypes dwarf.DWDie // up all attrs in a single large table, then store indices into the // table in the DIE. This would allow us to common up storage for // attributes that are shared by many DIEs (ex: byte size of N). -func newattr(die *dwarf.DWDie, attr uint16, cls int, value int64, data interface{}) { +func newattr(die *dwarf.DWDie, attr uint16, cls int, value int64, data any) { a := new(dwarf.DWAttr) a.Link = die.Attr die.Attr = a @@ -1011,7 +1011,7 @@ func (d *dwctxt) addDwarfAddrRef(sb *loader.SymbolBuilder, t loader.Sym) { func (d *dwctxt) calcCompUnitRanges() { var prevUnit *sym.CompilationUnit for _, s := range d.linkctxt.Textp { - sym := loader.Sym(s) + sym := s fi := d.ldr.FuncInfo(sym) if !fi.Valid() { @@ -1033,7 +1033,7 @@ func (d *dwctxt) calcCompUnitRanges() { // only create boundaries between symbols from // different units. sval := d.ldr.SymValue(sym) - u0val := d.ldr.SymValue(loader.Sym(unit.Textp[0])) + u0val := d.ldr.SymValue(unit.Textp[0]) if prevUnit != unit { unit.PCs = append(unit.PCs, dwarf.Range{Start: sval - u0val}) prevUnit = unit @@ -1339,7 +1339,7 @@ func (d *dwctxt) writelines(unit *sym.CompilationUnit, lineProlog loader.Sym) [] // Output the state machine for each function remaining. for _, s := range unit.Textp { - fnSym := loader.Sym(s) + fnSym := s _, _, _, lines := d.ldr.GetFuncDwarfAuxSyms(fnSym) // Chain the line symbol onto the list. @@ -1399,7 +1399,7 @@ func (d *dwctxt) writepcranges(unit *sym.CompilationUnit, base loader.Sym, pcs [ // Collect up the ranges for functions in the unit. rsize := uint64(rsu.Size()) for _, ls := range unit.RangeSyms { - s := loader.Sym(ls) + s := ls syms = append(syms, s) rsize += uint64(d.ldr.SymSize(s)) } @@ -1501,7 +1501,7 @@ func (d *dwctxt) writeframes(fs loader.Sym) dwarfSecInfo { var deltaBuf []byte pcsp := obj.NewPCIter(uint32(d.arch.MinLC)) for _, s := range d.linkctxt.Textp { - fn := loader.Sym(s) + fn := s fi := d.ldr.FuncInfo(fn) if !fi.Valid() { continue @@ -1646,7 +1646,7 @@ func (d *dwctxt) writeUnitInfo(u *sym.CompilationUnit, abbrevsym loader.Sym, add cu = append(cu, u.AbsFnDIEs...) cu = append(cu, u.FuncDIEs...) if u.Consts != 0 { - cu = append(cu, loader.Sym(u.Consts)) + cu = append(cu, u.Consts) } cu = append(cu, u.VarDIEs...) var cusize int64 @@ -1772,7 +1772,7 @@ func (d *dwctxt) assignDebugAddrSlot(unit *sym.CompilationUnit, fnsym loader.Sym if unit.Addrs == nil { unit.Addrs = make(map[sym.LoaderSym]uint32) } - if _, ok := unit.Addrs[sym.LoaderSym(rsym)]; ok { + if _, ok := unit.Addrs[rsym]; ok { // already present, no work needed } else { sl := len(unit.Addrs) @@ -1781,7 +1781,7 @@ func (d *dwctxt) assignDebugAddrSlot(unit *sym.CompilationUnit, fnsym loader.Sym if sl > lim { log.Fatalf("internal error: %s relocation overflow on infosym for %s", rt.String(), d.ldr.SymName(fnsym)) } - unit.Addrs[sym.LoaderSym(rsym)] = uint32(sl) + unit.Addrs[rsym] = uint32(sl) sb.AddAddrPlus(d.arch, rsym, 0) data := sb.Data() if d.arch.PtrSize == 4 { @@ -1805,11 +1805,11 @@ func (d *dwctxt) dwarfVisitFunction(fnSym loader.Sym, unit *sym.CompilationUnit) } d.ldr.SetAttrNotInSymbolTable(infosym, true) d.ldr.SetAttrReachable(infosym, true) - unit.FuncDIEs = append(unit.FuncDIEs, sym.LoaderSym(infosym)) + unit.FuncDIEs = append(unit.FuncDIEs, infosym) if rangesym != 0 { d.ldr.SetAttrNotInSymbolTable(rangesym, true) d.ldr.SetAttrReachable(rangesym, true) - unit.RangeSyms = append(unit.RangeSyms, sym.LoaderSym(rangesym)) + unit.RangeSyms = append(unit.RangeSyms, rangesym) } // Walk the relocations of the subprogram DIE symbol to discover @@ -1836,7 +1836,7 @@ func (d *dwctxt) dwarfVisitFunction(fnSym loader.Sym, unit *sym.CompilationUnit) if !d.ldr.AttrOnList(rsym) { // abstract function d.ldr.SetAttrOnList(rsym, true) - unit.AbsFnDIEs = append(unit.AbsFnDIEs, sym.LoaderSym(rsym)) + unit.AbsFnDIEs = append(unit.AbsFnDIEs, rsym) d.importInfoSymbol(rsym) } continue @@ -1942,7 +1942,7 @@ func dwarfGenerateDebugInfo(ctxt *Link) { for _, unit := range lib.Units { // We drop the constants into the first CU. if consts != 0 { - unit.Consts = sym.LoaderSym(consts) + unit.Consts = consts d.importInfoSymbol(consts) consts = 0 } @@ -2008,7 +2008,7 @@ func dwarfGenerateDebugInfo(ctxt *Link) { // abstract functions, visit range symbols. Note that // Textp has been dead-code-eliminated already. for _, s := range unit.Textp { - d.dwarfVisitFunction(loader.Sym(s), unit) + d.dwarfVisitFunction(s, unit) } } } @@ -2073,7 +2073,7 @@ func dwarfGenerateDebugInfo(ctxt *Link) { if varDIE != 0 { unit := d.ldr.SymUnit(idx) d.defgotype(gt) - unit.VarDIEs = append(unit.VarDIEs, sym.LoaderSym(varDIE)) + unit.VarDIEs = append(unit.VarDIEs, varDIE) } } @@ -2125,7 +2125,7 @@ type dwUnitSyms struct { func (d *dwctxt) dwUnitPortion(u *sym.CompilationUnit, abbrevsym loader.Sym, us *dwUnitSyms) { if u.DWInfo.Abbrev != dwarf.DW_ABRV_COMPUNIT_TEXTLESS { us.linesyms = d.writelines(u, us.lineProlog) - base := loader.Sym(u.Textp[0]) + base := u.Textp[0] if buildcfg.Experiment.Dwarf5 { d.writedebugaddr(u, us.addrsym) } @@ -2145,7 +2145,7 @@ func (d *dwctxt) writedebugaddr(unit *sym.CompilationUnit, debugaddr loader.Sym) var dsyms []loader.Sym for _, s := range unit.Textp { - fnSym := loader.Sym(s) + fnSym := s // NB: this looks at SDWARFFCN; it will need to also look // at range and loc when they get there. infosym, locsym, rangessym, _ := d.ldr.GetFuncDwarfAuxSyms(fnSym) @@ -2305,8 +2305,8 @@ func (d *dwctxt) dwarfGenerateDebugSyms() { len += uint64(d.ldr.SymSize(hdrsym)) su := d.ldr.MakeSymbolUpdater(hdrsym) if isDwarf64(d.linkctxt) { - len -= 12 // sub size of length field - su.SetUint(d.arch, 4, uint64(len)) // 4 because of 0XFFFFFFFF + len -= 12 // sub size of length field + su.SetUint(d.arch, 4, len) // 4 because of 0XFFFFFFFF } else { len -= 4 // subtract size of length field su.SetUint32(d.arch, 0, uint32(len)) @@ -2377,7 +2377,7 @@ func (d *dwctxt) dwarfGenerateDebugSyms() { func (d *dwctxt) collectUnitLocs(u *sym.CompilationUnit) []loader.Sym { syms := []loader.Sym{} for _, fn := range u.FuncDIEs { - relocs := d.ldr.Relocs(loader.Sym(fn)) + relocs := d.ldr.Relocs(fn) for i := 0; i < relocs.Count(); i++ { reloc := relocs.At(i) if reloc.Type() != objabi.R_DWARFSECREF { @@ -2510,7 +2510,7 @@ func dwarfcompress(ctxt *Link) { ldr.SetSymValue(s, int64(pos)) sect := ldr.SymSect(s) if sect != prevSect { - sect.Vaddr = uint64(pos) + sect.Vaddr = pos prevSect = sect } if ldr.SubSym(s) != 0 { diff --git a/src/cmd/link/internal/ld/elf.go b/src/cmd/link/internal/ld/elf.go index 6ff1d943833..62736ab94bd 100644 --- a/src/cmd/link/internal/ld/elf.go +++ b/src/cmd/link/internal/ld/elf.go @@ -61,80 +61,24 @@ import ( * */ -/* - * ELF definitions that are independent of architecture or word size. - */ +// ELF definitions that are independent of architecture or word size. -/* - * Note header. The ".note" section contains an array of notes. Each - * begins with this header, aligned to a word boundary. Immediately - * following the note header is n_namesz bytes of name, padded to the - * next word boundary. Then comes n_descsz bytes of descriptor, again - * padded to a word boundary. The values of n_namesz and n_descsz do - * not include the padding. - */ -type elfNote struct { - nNamesz uint32 - nDescsz uint32 - nType uint32 -} +// We use the 64-bit data structures on both 32- and 64-bit machines +// in order to write the code just once. The 64-bit data structure is +// written in the 32-bit format on the 32-bit machines. -/* For accessing the fields of r_info. */ - -/* For constructing r_info from field values. */ - -/* - * Relocation types. - */ -const ( - ARM_MAGIC_TRAMP_NUMBER = 0x5c000003 -) - -/* - * Symbol table entries. - */ - -/* For accessing the fields of st_info. */ - -/* For constructing st_info from field values. */ - -/* For accessing the fields of st_other. */ - -/* - * ELF header. - */ +// ElfEhdr is the ELF file header. type ElfEhdr elf.Header64 -/* - * Section header. - */ +// ElfShdr is an ELF section entry, plus the section index. type ElfShdr struct { elf.Section64 shnum elf.SectionIndex } -/* - * Program header. - */ +// ElfPhdr is the ELF program, or segment, header. type ElfPhdr elf.ProgHeader -/* For accessing the fields of r_info. */ - -/* For constructing r_info from field values. */ - -/* - * Symbol table entries. - */ - -/* For accessing the fields of st_info. */ - -/* For constructing st_info from field values. */ - -/* For accessing the fields of st_other. */ - -/* - * Go linker interface - */ const ( ELF64HDRSIZE = 64 ELF64PHDRSIZE = 56 @@ -149,32 +93,13 @@ const ( ELF32RELSIZE = 8 ) -/* - * The interface uses the 64-bit structures always, - * to avoid code duplication. The writers know how to - * marshal a 32-bit representation from the 64-bit structure. - */ - var elfstrdat, elfshstrdat []byte -/* - * Total amount of space to reserve at the start of the file - * for Header, PHeaders, SHeaders, and interp. - * May waste some. - * On FreeBSD, cannot be larger than a page. - */ -const ( - ELFRESERVE = 4096 -) - -/* - * We use the 64-bit data structures on both 32- and 64-bit machines - * in order to write the code just once. The 64-bit data structure is - * written in the 32-bit format on the 32-bit machines. - */ -const ( - NSECT = 400 -) +// ELFRESERVE is the total amount of space to reserve at the +// start of the file for Header, PHeaders, SHeaders, and interp. +// May waste some space. +// On FreeBSD, cannot be larger than a page. +const ELFRESERVE = 4096 var ( Nelfsym = 1 @@ -185,8 +110,8 @@ var ( elfRelType string ehdr ElfEhdr - phdr [NSECT]*ElfPhdr - shdr [NSECT]*ElfShdr + phdr = make([]*ElfPhdr, 0, 8) + shdr = make([]*ElfShdr, 0, 64) interp string ) @@ -227,10 +152,8 @@ var nelfstr int var buildinfo []byte -/* -Initialize the global variable that describes the ELF header. It will be updated as -we write section and prog headers. -*/ +// Elfinit initializes the global ehdr variable that holds the ELF header. +// It will be updated as write section and program headers. func Elfinit(ctxt *Link) { ctxt.IsELF = true @@ -244,28 +167,28 @@ func Elfinit(ctxt *Link) { // 64-bit architectures case sys.PPC64, sys.S390X: if ctxt.Arch.ByteOrder == binary.BigEndian && ctxt.HeadType != objabi.Hopenbsd { - ehdr.Flags = 1 /* Version 1 ABI */ + ehdr.Flags = 1 // Version 1 ABI } else { - ehdr.Flags = 2 /* Version 2 ABI */ + ehdr.Flags = 2 // Version 2 ABI } fallthrough case sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS64, sys.RISCV64: if ctxt.Arch.Family == sys.MIPS64 { - ehdr.Flags = 0x20000004 /* MIPS 3 CPIC */ + ehdr.Flags = 0x20000004 // MIPS 3 CPIC } if ctxt.Arch.Family == sys.Loong64 { - ehdr.Flags = 0x43 /* DOUBLE_FLOAT, OBJABI_V1 */ + ehdr.Flags = 0x43 // DOUBLE_FLOAT, OBJABI_V1 } if ctxt.Arch.Family == sys.RISCV64 { - ehdr.Flags = 0x4 /* RISCV Float ABI Double */ + ehdr.Flags = 0x4 // RISCV Float ABI Double } elf64 = true - ehdr.Phoff = ELF64HDRSIZE /* Must be ELF64HDRSIZE: first PHdr must follow ELF header */ - ehdr.Shoff = ELF64HDRSIZE /* Will move as we add PHeaders */ - ehdr.Ehsize = ELF64HDRSIZE /* Must be ELF64HDRSIZE */ - ehdr.Phentsize = ELF64PHDRSIZE /* Must be ELF64PHDRSIZE */ - ehdr.Shentsize = ELF64SHDRSIZE /* Must be ELF64SHDRSIZE */ + ehdr.Phoff = ELF64HDRSIZE // Must be ELF64HDRSIZE: first PHdr must follow ELF header + ehdr.Shoff = ELF64HDRSIZE // Will move as we add PHeaders + ehdr.Ehsize = ELF64HDRSIZE // Must be ELF64HDRSIZE + ehdr.Phentsize = ELF64PHDRSIZE // Must be ELF64PHDRSIZE + ehdr.Shentsize = ELF64SHDRSIZE // Must be ELF64SHDRSIZE // 32-bit architectures case sys.ARM, sys.MIPS: @@ -283,22 +206,21 @@ func Elfinit(ctxt *Link) { ehdr.Flags = 0x5000002 // has entry point, Version5 EABI } } else if ctxt.Arch.Family == sys.MIPS { - ehdr.Flags = 0x50001004 /* MIPS 32 CPIC O32*/ + ehdr.Flags = 0x50001004 // MIPS 32 CPIC O32 } fallthrough default: ehdr.Phoff = ELF32HDRSIZE - /* Must be ELF32HDRSIZE: first PHdr must follow ELF header */ - ehdr.Shoff = ELF32HDRSIZE /* Will move as we add PHeaders */ - ehdr.Ehsize = ELF32HDRSIZE /* Must be ELF32HDRSIZE */ - ehdr.Phentsize = ELF32PHDRSIZE /* Must be ELF32PHDRSIZE */ - ehdr.Shentsize = ELF32SHDRSIZE /* Must be ELF32SHDRSIZE */ + // Must be ELF32HDRSIZE: first PHdr must follow ELF header + ehdr.Shoff = ELF32HDRSIZE // Will move as we add PHeaders + ehdr.Ehsize = ELF32HDRSIZE // Must be ELF32HDRSIZE + ehdr.Phentsize = ELF32PHDRSIZE // Must be ELF32PHDRSIZE + ehdr.Shentsize = ELF32SHDRSIZE // Must be ELF32SHDRSIZE } } -// Make sure PT_LOAD is aligned properly and -// that there is no gap, -// correct ELF loaders will do this implicitly, +// fixElfPhdr makes sure PT_LOAD is aligned properly and that there is no gap. +// Correct ELF loaders will do this implicitly, // but buggy ELF loaders like the one in some // versions of QEMU and UPX won't. func fixElfPhdr(e *ElfPhdr) { @@ -343,8 +265,8 @@ func elf32phdr(out *OutBuf, e *ElfPhdr) { func elf64shdr(out *OutBuf, e *ElfShdr) { out.Write32(e.Name) - out.Write32(uint32(e.Type)) - out.Write64(uint64(e.Flags)) + out.Write32(e.Type) + out.Write64(e.Flags) out.Write64(e.Addr) out.Write64(e.Off) out.Write64(e.Size) @@ -356,7 +278,7 @@ func elf64shdr(out *OutBuf, e *ElfShdr) { func elf32shdr(out *OutBuf, e *ElfShdr) { out.Write32(e.Name) - out.Write32(uint32(e.Type)) + out.Write32(e.Type) out.Write32(uint32(e.Flags)) out.Write32(uint32(e.Addr)) out.Write32(uint32(e.Off)) @@ -408,12 +330,8 @@ func elfwritephdrs(out *OutBuf) uint32 { func newElfPhdr() *ElfPhdr { e := new(ElfPhdr) - if ehdr.Phnum >= NSECT { - Errorf("too many phdrs") - } else { - phdr[ehdr.Phnum] = e - ehdr.Phnum++ - } + phdr = append(phdr, e) + ehdr.Phnum++ if elf64 { ehdr.Shoff += ELF64PHDRSIZE } else { @@ -426,13 +344,8 @@ func newElfShdr(name int64) *ElfShdr { e := new(ElfShdr) e.Name = uint32(name) e.shnum = elf.SectionIndex(ehdr.Shnum) - if ehdr.Shnum >= NSECT { - Errorf("too many shdrs") - } else { - shdr[ehdr.Shnum] = e - ehdr.Shnum++ - } - + shdr = append(shdr, e) + ehdr.Shnum++ return e } @@ -442,9 +355,9 @@ func getElfEhdr() *ElfEhdr { func elf64writehdr(out *OutBuf) uint32 { out.Write(ehdr.Ident[:]) - out.Write16(uint16(ehdr.Type)) - out.Write16(uint16(ehdr.Machine)) - out.Write32(uint32(ehdr.Version)) + out.Write16(ehdr.Type) + out.Write16(ehdr.Machine) + out.Write32(ehdr.Version) out.Write64(ehdr.Entry) out.Write64(ehdr.Phoff) out.Write64(ehdr.Shoff) @@ -460,9 +373,9 @@ func elf64writehdr(out *OutBuf) uint32 { func elf32writehdr(out *OutBuf) uint32 { out.Write(ehdr.Ident[:]) - out.Write16(uint16(ehdr.Type)) - out.Write16(uint16(ehdr.Machine)) - out.Write32(uint32(ehdr.Version)) + out.Write16(ehdr.Type) + out.Write16(ehdr.Machine) + out.Write32(ehdr.Version) out.Write32(uint32(ehdr.Entry)) out.Write32(uint32(ehdr.Phoff)) out.Write32(uint32(ehdr.Shoff)) @@ -483,7 +396,10 @@ func elfwritehdr(out *OutBuf) uint32 { return elf32writehdr(out) } -/* Taken directly from the definition document for ELF64. */ +// elfhash is the dynamic symbol hash function . +// This is taken directly from the definition document for ELF, +// except that instead of finishing with "h &^= g" we just write +// "h &= 0xfffffff", which results in the same value. func elfhash(name string) uint32 { var h uint32 for i := 0; i < len(name); i++ { @@ -578,6 +494,8 @@ func elfMipsAbiFlags(sh *ElfShdr, startva uint64, resoff uint64) int { return n } +// elfWriteMipsAbiFlags writes the .MIPS.abiflags section. +// // Layout is given by this C definition: // // typedef struct @@ -668,7 +586,7 @@ const ( ELF_NOTE_NETBSD_NAMESZ = 7 ELF_NOTE_NETBSD_DESCSZ = 4 ELF_NOTE_NETBSD_TAG = 1 - ELF_NOTE_NETBSD_VERSION = 700000000 /* NetBSD 7.0 */ + ELF_NOTE_NETBSD_VERSION = 700000000 // NetBSD 7.0 ) var ELF_NOTE_NETBSD_NAME = []byte("NetBSD\x00") @@ -1250,11 +1168,12 @@ func elfshbits(linkmode LinkMode, sect *sym.Section) *ElfShdr { func elfshreloc(arch *sys.Arch, sect *sym.Section) *ElfShdr { // If main section is SHT_NOBITS, nothing to relocate. - // Also nothing to relocate in .shstrtab or notes. + // Also nothing to relocate in .shstrtab or notes or .gopclntab. if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen { return nil } - if sect.Name == ".shstrtab" || sect.Name == ".tbss" { + switch sect.Name { + case ".shstrtab", ".tbss", ".gopclntab": return nil } if sect.Elfsect.(*ElfShdr).Type == uint32(elf.SHT_NOTE) { @@ -1379,7 +1298,7 @@ func elfEmitReloc(ctxt *Link) { for i := 0; i < len(Segdwarf.Sections); i++ { sect := Segdwarf.Sections[i] si := dwarfp[i] - if si.secSym() != loader.Sym(sect.Sym) || + if si.secSym() != sect.Sym || ctxt.loader.SymSect(si.secSym()) != sect { panic("inconsistency between dwarfp and Segdwarf") } @@ -1415,7 +1334,7 @@ func addgonote(ctxt *Link, sectionName string, tag uint32, desc []byte) { func (ctxt *Link) doelf() { ldr := ctxt.loader - /* predefine strings we need for section headers */ + // Predefine strings we need for section headers. addshstr := func(s string) int { off := len(elfshstrdat) @@ -1426,7 +1345,7 @@ func (ctxt *Link) doelf() { shstrtabAddstring := func(s string) { off := addshstr(s) - elfsetstring(ctxt, 0, s, int(off)) + elfsetstring(ctxt, 0, s, off) } shstrtabAddstring("") @@ -1469,6 +1388,7 @@ func (ctxt *Link) doelf() { } shstrtabAddstring(".elfdata") shstrtabAddstring(".rodata") + shstrtabAddstring(".gopclntab") // See the comment about data.rel.ro.FOO section names in data.go. relro_prefix := "" if ctxt.UseRelro() { @@ -1477,8 +1397,6 @@ func (ctxt *Link) doelf() { } shstrtabAddstring(relro_prefix + ".typelink") shstrtabAddstring(relro_prefix + ".itablink") - shstrtabAddstring(relro_prefix + ".gosymtab") - shstrtabAddstring(relro_prefix + ".gopclntab") if ctxt.IsExternal() { *FlagD = true @@ -1487,8 +1405,6 @@ func (ctxt *Link) doelf() { shstrtabAddstring(elfRelType + ".rodata") shstrtabAddstring(elfRelType + relro_prefix + ".typelink") shstrtabAddstring(elfRelType + relro_prefix + ".itablink") - shstrtabAddstring(elfRelType + relro_prefix + ".gosymtab") - shstrtabAddstring(elfRelType + relro_prefix + ".gopclntab") shstrtabAddstring(elfRelType + ".noptrdata") shstrtabAddstring(elfRelType + ".data") if ctxt.UseRelro() { @@ -1513,7 +1429,7 @@ func (ctxt *Link) doelf() { hasinitarr := ctxt.linkShared - /* shared library initializer */ + // Shared library initializer. switch ctxt.BuildMode { case BuildModeCArchive, BuildModeCShared, BuildModeShared, BuildModePlugin: hasinitarr = true @@ -1534,7 +1450,7 @@ func (ctxt *Link) doelf() { shstrtabAddstring(".shstrtab") - if !*FlagD { /* -d suppresses dynamic loader format */ + if !*FlagD { // -d suppresses dynamic loader format shstrtabAddstring(".interp") shstrtabAddstring(".hash") shstrtabAddstring(".got") @@ -1552,7 +1468,7 @@ func (ctxt *Link) doelf() { shstrtabAddstring(".gnu.version") shstrtabAddstring(".gnu.version_r") - /* dynamic symbol table - first entry all zeros */ + // dynamic symbol table - first entry all zeros dynsym := ldr.CreateSymForUpdate(".dynsym", 0) dynsym.SetType(sym.SELFROSECT) @@ -1562,7 +1478,7 @@ func (ctxt *Link) doelf() { dynsym.SetSize(dynsym.Size() + ELF32SYMSIZE) } - /* dynamic string table */ + // dynamic string table dynstr := ldr.CreateSymForUpdate(".dynstr", 0) dynstr.SetType(sym.SELFROSECT) @@ -1570,11 +1486,11 @@ func (ctxt *Link) doelf() { dynstr.Addstring("") } - /* relocation table */ + // relocation table s := ldr.CreateSymForUpdate(elfRelType, 0) s.SetType(sym.SELFROSECT) - /* global offset table */ + // global offset table got := ldr.CreateSymForUpdate(".got", 0) if ctxt.UseRelro() { got.SetType(sym.SELFRELROSECT) @@ -1582,13 +1498,13 @@ func (ctxt *Link) doelf() { got.SetType(sym.SELFGOT) // writable } - /* ppc64 glink resolver */ + // ppc64 glink resolver if ctxt.IsPPC64() { s := ldr.CreateSymForUpdate(".glink", 0) s.SetType(sym.SELFRXSECT) } - /* hash */ + // hash hash := ldr.CreateSymForUpdate(".hash", 0) hash.SetType(sym.SELFROSECT) @@ -1617,7 +1533,7 @@ func (ctxt *Link) doelf() { s = ldr.CreateSymForUpdate(".gnu.version_r", 0) s.SetType(sym.SELFROSECT) - /* define dynamic elf table */ + // define dynamic elf table dynamic := ldr.CreateSymForUpdate(".dynamic", 0) switch { case thearch.ELF.DynamicReadOnly: @@ -1634,9 +1550,7 @@ func (ctxt *Link) doelf() { } thearch.ELF.SetupPLT(ctxt, ctxt.loader, plt, gotplt, dynamic.Sym()) - /* - * .dynamic table - */ + // .dynamic table elfWriteDynEntSym(ctxt, dynamic, elf.DT_HASH, hash.Sym()) elfWriteDynEntSym(ctxt, dynamic, elf.DT_SYMTAB, dynsym.Sym()) @@ -1741,7 +1655,7 @@ func (ctxt *Link) doelf() { } } -// Do not write DT_NULL. elfdynhash will finish it. +// shsym fills in fields of sh where s contains the contents of the section. func shsym(sh *ElfShdr, ldr *loader.Loader, s loader.Sym) { if s == 0 { panic("bad symbol in shsym2") @@ -1764,7 +1678,7 @@ func phsh(ph *ElfPhdr, sh *ElfShdr) { } func Asmbelfsetup() { - /* This null SHdr must appear before all others */ + // This null SHdr must appear before all others. elfshname("") for _, sect := range Segtext.Sections { @@ -1872,7 +1786,7 @@ func asmbElf(ctxt *Link) { phsh(getpnote(), sh) } if ctxt.LinkMode == LinkExternal { - /* skip program headers */ + // skip program headers eh.Phoff = 0 eh.Phentsize = 0 @@ -1896,7 +1810,7 @@ func asmbElf(ctxt *Link) { goto elfobj } - /* program header info */ + // program header info pph = newElfPhdr() pph.Type = elf.PT_PHDR @@ -1906,10 +1820,8 @@ func asmbElf(ctxt *Link) { pph.Paddr = uint64(*FlagTextAddr) - uint64(HEADR) + pph.Off pph.Align = uint64(*FlagRound) - /* - * PHDR must be in a loaded segment. Adjust the text - * segment boundaries downwards to include it. - */ + // PHDR must be in a loaded segment. Adjust the text + // segment boundaries downwards to include it. { o := int64(Segtext.Vaddr - pph.Vaddr) Segtext.Vaddr -= uint64(o) @@ -1919,8 +1831,8 @@ func asmbElf(ctxt *Link) { Segtext.Filelen += uint64(o) } - if !*FlagD { /* -d suppresses dynamic loader format */ - /* interpreter */ + if !*FlagD { // -d suppresses dynamic loader format + // interpreter sh := elfshname(".interp") sh.Type = uint32(elf.SHT_PROGBITS) @@ -2024,7 +1936,7 @@ func asmbElf(ctxt *Link) { } elfphload(&Segdata) - /* Dynamic linking sections */ + // Dynamic linking sections if !*FlagD { sh := elfshname(".dynsym") sh.Type = uint32(elf.SHT_DYNSYM) @@ -2162,7 +2074,7 @@ func asmbElf(ctxt *Link) { sh.Link = uint32(elfshname(".dynsym").shnum) shsym(sh, ldr, ldr.Lookup(".hash", 0)) - /* sh and elf.PT_DYNAMIC for .dynamic section */ + // sh and elf.PT_DYNAMIC for .dynamic section sh = elfshname(".dynamic") sh.Type = uint32(elf.SHT_DYNAMIC) @@ -2176,9 +2088,7 @@ func asmbElf(ctxt *Link) { ph.Flags = elf.PF_R + elf.PF_W phsh(ph, sh) - /* - * Thread-local storage segment (really just size). - */ + // Thread-local storage segment (really just size). tlssize := uint64(0) for _, sect := range Segdata.Sections { if sect.Name == ".tbss" { @@ -2307,7 +2217,7 @@ elfobj: sh.Size = uint64(len(elfshstrdat)) sh.Addralign = 1 - /* Main header */ + // Main header copy(eh.Ident[:], elf.ELFMAG) var osabi elf.OSABI @@ -2412,8 +2322,7 @@ func elfadddynsym(ldr *loader.Loader, target *Target, syms *ArchSyms, s loader.S d.AddUint32(target.Arch, uint32(dstru.Addstring(name))) if elf64 { - - /* type */ + // type var t uint8 if cgoexp && st.IsText() { @@ -2423,24 +2332,24 @@ func elfadddynsym(ldr *loader.Loader, target *Target, syms *ArchSyms, s loader.S } d.AddUint8(t) - /* reserved */ + // reserved d.AddUint8(0) - /* section where symbol is defined */ + // section where symbol is defined if st == sym.SDYNIMPORT { d.AddUint16(target.Arch, uint16(elf.SHN_UNDEF)) } else { d.AddUint16(target.Arch, 1) } - /* value */ + // value if st == sym.SDYNIMPORT { d.AddUint64(target.Arch, 0) } else { d.AddAddrPlus(target.Arch, s, 0) } - /* size of object */ + // size of object d.AddUint64(target.Arch, uint64(len(ldr.Data(s)))) dil := ldr.SymDynimplib(s) @@ -2452,17 +2361,17 @@ func elfadddynsym(ldr *loader.Loader, target *Target, syms *ArchSyms, s loader.S } } else { - /* value */ + // value if st == sym.SDYNIMPORT { d.AddUint32(target.Arch, 0) } else { d.AddAddrPlus(target.Arch, s, 0) } - /* size of object */ + // size of object d.AddUint32(target.Arch, uint32(len(ldr.Data(s)))) - /* type */ + // type var t uint8 // TODO(mwhudson): presumably the behavior should actually be the same on both arm and 386. @@ -2476,7 +2385,7 @@ func elfadddynsym(ldr *loader.Loader, target *Target, syms *ArchSyms, s loader.S d.AddUint8(t) d.AddUint8(0) - /* shndx */ + // shndx if st == sym.SDYNIMPORT { d.AddUint16(target.Arch, uint16(elf.SHN_UNDEF)) } else { diff --git a/src/cmd/link/internal/ld/elf_test.go b/src/cmd/link/internal/ld/elf_test.go index c2a1bc0b944..c56d27f29e4 100644 --- a/src/cmd/link/internal/ld/elf_test.go +++ b/src/cmd/link/internal/ld/elf_test.go @@ -408,7 +408,7 @@ func TestElfBindNow(t *testing.T) { } // This program is intended to be just big/complicated enough that -// we wind up with decent-sized .data.rel.ro.{typelink,itablink,gopclntab} +// we wind up with decent-sized .data.rel.ro.{typelink,itablink} // sections. const ifacecallsProg = ` package main diff --git a/src/cmd/link/internal/ld/fallocate_test.go b/src/cmd/link/internal/ld/fallocate_test.go index 3c6b7ef752e..f463b5b63b3 100644 --- a/src/cmd/link/internal/ld/fallocate_test.go +++ b/src/cmd/link/internal/ld/fallocate_test.go @@ -10,6 +10,7 @@ import ( "errors" "os" "path/filepath" + "runtime" "syscall" "testing" ) @@ -53,12 +54,24 @@ func TestFallocate(t *testing.T) { if got := stat.Size(); got != sz { t.Errorf("unexpected file size: got %d, want %d", got, sz) } - // The number of blocks must be enough for the requested size. - // We used to require an exact match, but it appears that - // some file systems allocate a few extra blocks in some cases. - // See issue #41127. - if got, want := stat.Sys().(*syscall.Stat_t).Blocks, (sz+511)/512; got < want { - t.Errorf("unexpected disk usage: got %d blocks, want at least %d", got, want) + if runtime.GOOS == "darwin" { + // Check the number of allocated blocks on Darwin. On Linux (and + // perhaps BSDs), stat's Blocks field may not be portable as it + // is an implementation detail of the file system. On Darwin, it + // is documented as "the actual number of blocks allocated for + // the file in 512-byte units". + // The check is introduced when fixing a Darwin-specific bug. On + // Darwin, the file allocation syscall is a bit tricky. On Linux + // and BSDs, it is more straightforward and unlikely to go wrong. + // Given these two reasons, only check it on Darwin. + // + // The number of blocks must be enough for the requested size. + // We used to require an exact match, but it appears that + // some file systems allocate a few extra blocks in some cases. + // See issue #41127. + if got, want := stat.Sys().(*syscall.Stat_t).Blocks, (sz+511)/512; got < want { + t.Errorf("unexpected disk usage: got %d blocks, want at least %d", got, want) + } } out.munmap() } diff --git a/src/cmd/link/internal/ld/link.go b/src/cmd/link/internal/ld/link.go index df1fc7feaba..2276d39d888 100644 --- a/src/cmd/link/internal/ld/link.go +++ b/src/cmd/link/internal/ld/link.go @@ -126,7 +126,7 @@ type cgodata struct { directives [][]string } -func (ctxt *Link) Logf(format string, args ...interface{}) { +func (ctxt *Link) Logf(format string, args ...any) { fmt.Fprintf(ctxt.Bso, format, args...) ctxt.Bso.Flush() } diff --git a/src/cmd/link/internal/ld/macho.go b/src/cmd/link/internal/ld/macho.go index c2626346661..65ae1268c31 100644 --- a/src/cmd/link/internal/ld/macho.go +++ b/src/cmd/link/internal/ld/macho.go @@ -180,6 +180,8 @@ const ( BIND_SPECIAL_DYLIB_FLAT_LOOKUP = -2 BIND_SPECIAL_DYLIB_WEAK_LOOKUP = -3 + BIND_SYMBOL_FLAGS_WEAK_IMPORT = 0x1 + BIND_OPCODE_MASK = 0xF0 BIND_IMMEDIATE_MASK = 0x0F BIND_OPCODE_DONE = 0x00 @@ -919,7 +921,7 @@ func collectmachosyms(ctxt *Link) { continue } t := ldr.SymType(s) - if t >= sym.SELFRXSECT && t < sym.SXREF { // data sections handled in dodata + if t >= sym.SELFRXSECT && t < sym.SFirstUnallocated { // data sections handled in dodata if t == sym.STLSBSS { // TLSBSS is not used on darwin. See data.go:allocateDataSections continue @@ -1254,7 +1256,7 @@ func machoEmitReloc(ctxt *Link) { for i := 0; i < len(Segdwarf.Sections); i++ { sect := Segdwarf.Sections[i] si := dwarfp[i] - if si.secSym() != loader.Sym(sect.Sym) || + if si.secSym() != sect.Sym || ctxt.loader.SymSect(si.secSym()) != sect { panic("inconsistency between dwarfp and Segdwarf") } @@ -1429,7 +1431,11 @@ func machoDyldInfo(ctxt *Link) { bind.AddUint8(BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | uint8(d)&0xf) } - bind.AddUint8(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM) + flags := uint8(0) + if ldr.SymWeakBinding(r.targ) { + flags |= BIND_SYMBOL_FLAGS_WEAK_IMPORT + } + bind.AddUint8(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | flags) // target symbol name as a C string, with _ prefix bind.AddUint8('_') bind.Addstring(ldr.SymExtname(r.targ)) @@ -1535,11 +1541,11 @@ func machoCodeSign(ctxt *Link, fname string) error { // Uodate the __LINKEDIT segment. segSz := sigOff + sz - int64(linkeditSeg.Offset) mf.ByteOrder.PutUint64(tmp[:8], uint64(segSz)) - _, err = f.WriteAt(tmp[:8], int64(linkeditOff)+int64(unsafe.Offsetof(macho.Segment64{}.Memsz))) + _, err = f.WriteAt(tmp[:8], linkeditOff+int64(unsafe.Offsetof(macho.Segment64{}.Memsz))) if err != nil { return err } - _, err = f.WriteAt(tmp[:8], int64(linkeditOff)+int64(unsafe.Offsetof(macho.Segment64{}.Filesz))) + _, err = f.WriteAt(tmp[:8], linkeditOff+int64(unsafe.Offsetof(macho.Segment64{}.Filesz))) if err != nil { return err } diff --git a/src/cmd/link/internal/ld/macho_combine_dwarf.go b/src/cmd/link/internal/ld/macho_combine_dwarf.go index d60755f147d..2b303cbdae1 100644 --- a/src/cmd/link/internal/ld/macho_combine_dwarf.go +++ b/src/cmd/link/internal/ld/macho_combine_dwarf.go @@ -392,7 +392,7 @@ func machoUpdateDwarfHeader(r *imacho.LoadCmdUpdater, compressedSects []*macho.S return machoUpdateSections(*r, &seg, uint64(dwarfstart)-realdwarf.Offset, compressedSects) } -func machoUpdateLoadCommand(r imacho.LoadCmdUpdater, linkseg *macho.Segment, linkoffset uint64, cmd interface{}, fields ...string) error { +func machoUpdateLoadCommand(r imacho.LoadCmdUpdater, linkseg *macho.Segment, linkoffset uint64, cmd any, fields ...string) error { if err := r.ReadAt(0, cmd); err != nil { return err } diff --git a/src/cmd/link/internal/ld/macho_test.go b/src/cmd/link/internal/ld/macho_test.go index 29adc0b78b1..905de67f601 100644 --- a/src/cmd/link/internal/ld/macho_test.go +++ b/src/cmd/link/internal/ld/macho_test.go @@ -37,14 +37,14 @@ func TestMachoSectionsReadOnly(t *testing.T) { args: []string{"-ldflags", "-linkmode=internal"}, prog: prog, mustInternalLink: true, - wantSecsRO: []string{"__got", "__rodata", "__itablink", "__typelink", "__gosymtab", "__gopclntab"}, + wantSecsRO: []string{"__got", "__rodata", "__itablink", "__typelink"}, }, { name: "linkmode-external", args: []string{"-ldflags", "-linkmode=external"}, prog: prog, mustHaveCGO: true, - wantSecsRO: []string{"__got", "__rodata", "__itablink", "__typelink", "__gopclntab"}, + wantSecsRO: []string{"__got", "__rodata", "__itablink", "__typelink"}, }, { name: "cgo-linkmode-internal", @@ -52,14 +52,14 @@ func TestMachoSectionsReadOnly(t *testing.T) { prog: progC, mustHaveCGO: true, mustInternalLink: true, - wantSecsRO: []string{"__got", "__rodata", "__itablink", "__typelink", "__gopclntab"}, + wantSecsRO: []string{"__got", "__rodata", "__itablink", "__typelink"}, }, { name: "cgo-linkmode-external", args: []string{"-ldflags", "-linkmode=external"}, prog: progC, mustHaveCGO: true, - wantSecsRO: []string{"__got", "__rodata", "__itablink", "__typelink", "__gopclntab"}, + wantSecsRO: []string{"__got", "__rodata", "__itablink", "__typelink"}, }, } @@ -111,7 +111,8 @@ func TestMachoSectionsReadOnly(t *testing.T) { for _, wsroname := range test.wantSecsRO { // Now walk the sections. Section should be part of - // some segment that is readonly. + // some segment that is made readonly after + // relocations are appied. var wsro *macho.Section foundRO := false for _, s := range machoFile.Sections { diff --git a/src/cmd/link/internal/ld/pcln.go b/src/cmd/link/internal/ld/pcln.go index 9532b33a9bc..68af94a405a 100644 --- a/src/cmd/link/internal/ld/pcln.go +++ b/src/cmd/link/internal/ld/pcln.go @@ -216,7 +216,7 @@ func genInlTreeSym(ctxt *Link, cu *sym.CompilationUnit, fi loader.FuncInfo, arch const size = 16 inlTreeSym.SetUint8(arch, int64(i*size+0), uint8(funcID)) // Bytes 1-3 are unused. - inlTreeSym.SetUint32(arch, int64(i*size+4), uint32(nameOff)) + inlTreeSym.SetUint32(arch, int64(i*size+4), nameOff) inlTreeSym.SetUint32(arch, int64(i*size+8), uint32(call.ParentPC)) inlTreeSym.SetUint32(arch, int64(i*size+12), uint32(startLine)) } @@ -243,7 +243,6 @@ func makeInlSyms(ctxt *Link, funcs []loader.Sym, nameOffsets map[loader.Sym]uint // generator to fill in its data later. func (state *pclntab) generatePCHeader(ctxt *Link) { ldr := ctxt.loader - textStartOff := int64(8 + 2*ctxt.Arch.PtrSize) size := int64(8 + 8*ctxt.Arch.PtrSize) writeHeader := func(ctxt *Link, s loader.Sym) { header := ctxt.loader.MakeSymbolUpdater(s) @@ -264,10 +263,7 @@ func (state *pclntab) generatePCHeader(ctxt *Link) { header.SetUint8(ctxt.Arch, 7, uint8(ctxt.Arch.PtrSize)) off := header.SetUint(ctxt.Arch, 8, uint64(state.nfunc)) off = header.SetUint(ctxt.Arch, off, uint64(state.nfiles)) - if off != textStartOff { - panic(fmt.Sprintf("pcHeader textStartOff: %d != %d", off, textStartOff)) - } - off += int64(ctxt.Arch.PtrSize) // skip runtimeText relocation + off = header.SetUintptr(ctxt.Arch, off, 0) // unused off = writeSymOffset(off, state.funcnametab) off = writeSymOffset(off, state.cutab) off = writeSymOffset(off, state.filetab) @@ -279,9 +275,6 @@ func (state *pclntab) generatePCHeader(ctxt *Link) { } state.pcheader = state.addGeneratedSym(ctxt, "runtime.pcheader", size, writeHeader) - // Create the runtimeText relocation. - sb := ldr.MakeSymbolUpdater(state.pcheader) - sb.SetAddr(ctxt.Arch, textStartOff, ldr.Lookup("runtime.text", 0)) } // walkFuncs iterates over the funcs, calling a function for each unique @@ -683,14 +676,14 @@ func writeFuncs(ctxt *Link, sb *loader.SymbolBuilder, funcs []loader.Sym, inlSym off := int64(startLocations[i]) // entryOff uint32 (offset of func entry PC from textStart) entryOff := textOff(ctxt, s, textStart) - off = sb.SetUint32(ctxt.Arch, off, uint32(entryOff)) + off = sb.SetUint32(ctxt.Arch, off, entryOff) // nameOff int32 nameOff, ok := nameOffsets[s] if !ok { panic("couldn't find function name offset") } - off = sb.SetUint32(ctxt.Arch, off, uint32(nameOff)) + off = sb.SetUint32(ctxt.Arch, off, nameOff) // args int32 // TODO: Move into funcinfo. @@ -712,7 +705,7 @@ func writeFuncs(ctxt *Link, sb *loader.SymbolBuilder, funcs []loader.Sym, inlSym } else { off += 12 } - off = sb.SetUint32(ctxt.Arch, off, uint32(numPCData(ldr, s, fi))) + off = sb.SetUint32(ctxt.Arch, off, numPCData(ldr, s, fi)) // Store the offset to compilation unit's file table. cuIdx := ^uint32(0) diff --git a/src/cmd/link/internal/ld/pe.go b/src/cmd/link/internal/ld/pe.go index 0f0650e5e14..b49da42c4cf 100644 --- a/src/cmd/link/internal/ld/pe.go +++ b/src/cmd/link/internal/ld/pe.go @@ -653,7 +653,7 @@ dwarfLoop: for i := 0; i < len(Segdwarf.Sections); i++ { sect := Segdwarf.Sections[i] si := dwarfp[i] - if si.secSym() != loader.Sym(sect.Sym) || + if si.secSym() != sect.Sym || ldr.SymSect(si.secSym()) != sect { panic("inconsistency between dwarfp and Segdwarf") } @@ -854,7 +854,7 @@ func (f *peFile) writeSymbols(ctxt *Link) { continue } t := ldr.SymType(s) - if t >= sym.SELFRXSECT && t < sym.SXREF { // data sections handled in dodata + if t >= sym.SELFRXSECT && t < sym.SFirstUnallocated { // data sections handled in dodata if t == sym.STLSBSS { continue } @@ -1506,7 +1506,7 @@ func (rt *peBaseRelocTable) addentry(ldr *loader.Loader, s loader.Sym, r *loader const pageSize = 0x1000 const pageMask = pageSize - 1 - addr := ldr.SymValue(s) + int64(r.Off()) - int64(PEBASE) + addr := ldr.SymValue(s) + int64(r.Off()) - PEBASE page := uint32(addr &^ pageMask) off := uint32(addr & pageMask) @@ -1758,7 +1758,9 @@ func peCreateExportFile(ctxt *Link, libName string) (fname string) { fname = filepath.Join(*flagTmpdir, "export_file.def") var buf bytes.Buffer - fmt.Fprintf(&buf, "LIBRARY %s\n", libName) + if ctxt.BuildMode == BuildModeCShared { + fmt.Fprintf(&buf, "LIBRARY %s\n", libName) + } buf.WriteString("EXPORTS\n") ldr := ctxt.loader diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go index 2c999ccc4e3..a0345ca1c7b 100644 --- a/src/cmd/link/internal/ld/symtab.go +++ b/src/cmd/link/internal/ld/symtab.go @@ -244,7 +244,7 @@ func genelfsym(ctxt *Link, elfbind elf.SymBind) { continue } st := ldr.SymType(s) - if st >= sym.SELFRXSECT && st < sym.SXREF { + if st >= sym.SELFRXSECT && st < sym.SFirstUnallocated { typ := elf.STT_OBJECT if st == sym.STLSBSS { if ctxt.IsInternal() { @@ -345,7 +345,7 @@ func asmbPlan9Sym(ctxt *Link) { continue } t := ldr.SymType(s) - if t >= sym.SELFRXSECT && t < sym.SXREF { // data sections handled in dodata + if t >= sym.SELFRXSECT && t < sym.SFirstUnallocated { // data sections handled in dodata if t == sym.STLSBSS { continue } @@ -446,7 +446,6 @@ func (ctxt *Link) symtab(pcln *pclntab) []sym.SymKind { ctxt.xdefine("runtime.ecovctrs", sym.SNOPTRBSS, 0) ctxt.xdefine("runtime.end", sym.SBSS, 0) ctxt.xdefine("runtime.epclntab", sym.SRODATA, 0) - ctxt.xdefine("runtime.esymtab", sym.SRODATA, 0) // garbage collection symbols s := ldr.CreateSymForUpdate("runtime.gcdata", 0) @@ -506,11 +505,6 @@ func (ctxt *Link) symtab(pcln *pclntab) []sym.SymKind { symgofuncrel = groupSym("go:funcrel.*", sym.SGOFUNCRELRO) } - symt := ldr.CreateSymForUpdate("runtime.symtab", 0) - symt.SetType(sym.SSYMTAB) - symt.SetSize(0) - symt.SetLocal(true) - // assign specific types so that they sort together. // within a type they sort by size, so the .* symbols // just defined above will be first. @@ -843,7 +837,7 @@ func (ctxt *Link) symtab(pcln *pclntab) []sym.SymKind { } // CarrierSymByType tracks carrier symbols and their sizes. -var CarrierSymByType [sym.SXREF]struct { +var CarrierSymByType [sym.SFirstUnallocated]struct { Sym loader.Sym Size int64 } diff --git a/src/cmd/link/internal/ld/util.go b/src/cmd/link/internal/ld/util.go index 556c77d7326..f3787ff50b4 100644 --- a/src/cmd/link/internal/ld/util.go +++ b/src/cmd/link/internal/ld/util.go @@ -32,7 +32,7 @@ func Exit(code int) { } // Exitf logs an error message then calls Exit(2). -func Exitf(format string, a ...interface{}) { +func Exitf(format string, a ...any) { fmt.Fprintf(os.Stderr, os.Args[0]+": "+format+"\n", a...) nerrors++ if *flagH { @@ -60,7 +60,7 @@ func afterErrorAction() { // // Logging an error means that on exit cmd/link will delete any // output file and return a non-zero error code. -func Errorf(format string, args ...interface{}) { +func Errorf(format string, args ...any) { format += "\n" fmt.Fprintf(os.Stderr, format, args...) afterErrorAction() @@ -72,7 +72,7 @@ func Errorf(format string, args ...interface{}) { // // Logging an error means that on exit cmd/link will delete any // output file and return a non-zero error code. -func (ctxt *Link) Errorf(s loader.Sym, format string, args ...interface{}) { +func (ctxt *Link) Errorf(s loader.Sym, format string, args ...any) { if ctxt.loader != nil { ctxt.loader.Errorf(s, format, args...) return diff --git a/src/cmd/link/internal/ld/xcoff.go b/src/cmd/link/internal/ld/xcoff.go index da728e25455..4500a7cb0c9 100644 --- a/src/cmd/link/internal/ld/xcoff.go +++ b/src/cmd/link/internal/ld/xcoff.go @@ -167,8 +167,7 @@ const ( ) // Type representing all XCOFF symbols. -type xcoffSym interface { -} +type xcoffSym any // Symbol Table Entry type XcoffSymEnt64 struct { @@ -677,7 +676,7 @@ func (f *xcoffFile) writeSymbolNewFile(ctxt *Link, name string, firstEntry uint6 dwsize = getDwsectCUSize(sect.Name, name) // .debug_abbrev is common to all packages and not found with the previous function if sect.Name == ".debug_abbrev" { - dwsize = uint64(ldr.SymSize(loader.Sym(sect.Sym))) + dwsize = uint64(ldr.SymSize(sect.Sym)) } } else { @@ -699,7 +698,7 @@ func (f *xcoffFile) writeSymbolNewFile(ctxt *Link, name string, firstEntry uint6 // Dwarf relocations need the symbol number of .dw* symbols. // It doesn't need to know it for each package, one is enough. // currSymSrcFile.csectAux == nil means first package. - ldr.SetSymDynid(loader.Sym(sect.Sym), int32(f.symbolCount)) + ldr.SetSymDynid(sect.Sym, int32(f.symbolCount)) if sect.Name == ".debug_frame" && ctxt.LinkMode != LinkExternal { // CIE size must be added to the first package. @@ -865,7 +864,7 @@ func (f *xcoffFile) writeSymbolFunc(ctxt *Link, x loader.Sym) []xcoffSym { Xsmtyp: XTY_LD, // label definition (based on C) Xauxtype: _AUX_CSECT, } - a4.Xsmtyp |= uint8(xcoffAlign(ldr, x, TextSym) << 3) + a4.Xsmtyp |= xcoffAlign(ldr, x, TextSym) << 3 syms = append(syms, a4) return syms @@ -915,7 +914,7 @@ func putaixsym(ctxt *Link, x loader.Sym, t SymbolType) { Xsmclas: XMC_PR, Xsmtyp: XTY_SD, } - a4.Xsmtyp |= uint8(xcoffAlign(ldr, x, TextSym) << 3) + a4.Xsmtyp |= xcoffAlign(ldr, x, TextSym) << 3 syms = append(syms, a4) } @@ -976,7 +975,7 @@ func putaixsym(ctxt *Link, x loader.Sym, t SymbolType) { a4.Xsmtyp |= XTY_CM } - a4.Xsmtyp |= uint8(xcoffAlign(ldr, x, t) << 3) + a4.Xsmtyp |= xcoffAlign(ldr, x, t) << 3 syms = append(syms, a4) @@ -1121,7 +1120,7 @@ func (f *xcoffFile) asmaixsym(ctxt *Link) { putaixsym(ctxt, s, BSSSym) } - case st >= sym.SELFRXSECT && st < sym.SXREF: // data sections handled in dodata + case st >= sym.SELFRXSECT && st < sym.SFirstUnallocated: // data sections handled in dodata if ldr.AttrReachable(s) { putaixsym(ctxt, s, DataSym) } @@ -1375,7 +1374,7 @@ func (f *xcoffFile) writeLdrScn(ctxt *Link, globalOff uint64) { /* Symbol table */ for _, s := range f.loaderSymbols { lds := &XcoffLdSym64{ - Loffset: uint32(stlen + 2), + Loffset: stlen + 2, Lsmtype: s.smtype, Lsmclas: s.smclas, } @@ -1580,7 +1579,7 @@ func xcoffwrite(ctxt *Link) { func asmbXcoff(ctxt *Link) { ctxt.Out.SeekSet(0) fileoff := int64(Segdwarf.Fileoff + Segdwarf.Filelen) - fileoff = int64(Rnd(int64(fileoff), *FlagRound)) + fileoff = Rnd(fileoff, *FlagRound) xfile.sectNameToScnum = make(map[string]int16) @@ -1693,7 +1692,7 @@ func (f *xcoffFile) emitRelocations(ctxt *Link, fileoff int64) { if !ldr.AttrReachable(s) { continue } - if ldr.SymValue(s) >= int64(eaddr) { + if ldr.SymValue(s) >= eaddr { break } @@ -1755,7 +1754,7 @@ dwarfLoop: for i := 0; i < len(Segdwarf.Sections); i++ { sect := Segdwarf.Sections[i] si := dwarfp[i] - if si.secSym() != loader.Sym(sect.Sym) || + if si.secSym() != sect.Sym || ldr.SymSect(si.secSym()) != sect { panic("inconsistency between dwarfp and Segdwarf") } diff --git a/src/cmd/link/internal/loadelf/ldelf.go b/src/cmd/link/internal/loadelf/ldelf.go index bb0c22da7ef..1c83f03a5db 100644 --- a/src/cmd/link/internal/loadelf/ldelf.go +++ b/src/cmd/link/internal/loadelf/ldelf.go @@ -242,7 +242,7 @@ func parseArmAttributes(e binary.ByteOrder, data []byte) (found bool, ehdrFlags // object, and the returned ehdrFlags contains what this Load function computes. // TODO: find a better place for this logic. func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, f *bio.Reader, pkg string, length int64, pn string, initEhdrFlags uint32) (textp []loader.Sym, ehdrFlags uint32, err error) { - errorf := func(str string, args ...interface{}) ([]loader.Sym, uint32, error) { + errorf := func(str string, args ...any) ([]loader.Sym, uint32, error) { return nil, 0, fmt.Errorf("loadelf: %s: %v", pn, fmt.Sprintf(str, args...)) } @@ -785,12 +785,12 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, f *bio.Reader, rSym = 0 } else { var elfsym ElfSym - if err := readelfsym(l, arch, elfobj, int(symIdx), &elfsym, 0, 0); err != nil { + if err := readelfsym(l, arch, elfobj, symIdx, &elfsym, 0, 0); err != nil { return errorf("malformed elf file: %v", err) } elfsym.sym = symbols[symIdx] if elfsym.sym == 0 { - return errorf("malformed elf file: %s#%d: reloc of invalid sym #%d %s shndx=%d type=%d", l.SymName(sect.sym), j, int(symIdx), elfsym.name, elfsym.shndx, elfsym.type_) + return errorf("malformed elf file: %s#%d: reloc of invalid sym #%d %s shndx=%d type=%d", l.SymName(sect.sym), j, symIdx, elfsym.name, elfsym.shndx, elfsym.type_) } rSym = elfsym.sym @@ -854,7 +854,7 @@ func elfmap(elfobj *ElfObj, sect *ElfSect) (err error) { } elfobj.f.MustSeek(int64(uint64(elfobj.base)+sect.off), 0) - sect.base, sect.readOnlyMem, err = elfobj.f.Slice(uint64(sect.size)) + sect.base, sect.readOnlyMem, err = elfobj.f.Slice(sect.size) if err != nil { return fmt.Errorf("short read: %v", err) } diff --git a/src/cmd/link/internal/loader/loader.go b/src/cmd/link/internal/loader/loader.go index 0ed20d1becb..2d386c0c65e 100644 --- a/src/cmd/link/internal/loader/loader.go +++ b/src/cmd/link/internal/loader/loader.go @@ -241,6 +241,7 @@ type Loader struct { plt map[Sym]int32 // stores dynimport for pe objects got map[Sym]int32 // stores got for pe objects dynid map[Sym]int32 // stores Dynid for symbol + weakBinding map[Sym]bool // stores whether a symbol has a weak binding relocVariant map[relocId]sym.RelocVariant // stores variant relocs @@ -326,6 +327,7 @@ func NewLoader(flags uint32, reporter *ErrorReporter) *Loader { plt: make(map[Sym]int32), got: make(map[Sym]int32), dynid: make(map[Sym]int32), + weakBinding: make(map[Sym]bool), attrCgoExportDynamic: make(map[Sym]struct{}), attrCgoExportStatic: make(map[Sym]struct{}), deferReturnTramp: make(map[Sym]bool), @@ -806,7 +808,7 @@ func (l *Loader) SymVersion(i Sym) int { return pp.ver } r, li := l.toLocal(i) - return int(abiToVer(r.Sym(li).ABI(), r.version)) + return abiToVer(r.Sym(li).ABI(), r.version) } func (l *Loader) IsFileLocal(i Sym) bool { @@ -1351,9 +1353,6 @@ func (l *Loader) SetSymAlign(i Sym, align int32) { if int(i) >= len(l.align) { l.align = append(l.align, make([]uint8, l.NSym()-len(l.align))...) } - if align == 0 { - l.align[i] = 0 - } l.align[i] = uint8(bits.Len32(uint32(align))) } @@ -1450,6 +1449,18 @@ func (l *Loader) SetSymExtname(i Sym, value string) { } } +func (l *Loader) SymWeakBinding(i Sym) bool { + return l.weakBinding[i] +} + +func (l *Loader) SetSymWeakBinding(i Sym, v bool) { + // reject bad symbols + if i >= Sym(len(l.objSyms)) || i == 0 { + panic("bad symbol index in SetSymWeakBinding") + } + l.weakBinding[i] = v +} + // SymElfType returns the previously recorded ELF type for a symbol // (used only for symbols read from shared libraries by ldshlibsyms). // It is not set for symbols defined by the packages being linked or @@ -2748,15 +2759,15 @@ func (l *Loader) AssignTextSymbolOrder(libs []*sym.Library, intlibs []bool, exts // We still need to record its presence in the current // package, as the trampoline pass expects packages // are laid out in dependency order. - lib.DupTextSyms = append(lib.DupTextSyms, sym.LoaderSym(gi)) + lib.DupTextSyms = append(lib.DupTextSyms, gi) continue // symbol in different object } if dupok { - lib.DupTextSyms = append(lib.DupTextSyms, sym.LoaderSym(gi)) + lib.DupTextSyms = append(lib.DupTextSyms, gi) continue } - lib.Textp = append(lib.Textp, sym.LoaderSym(gi)) + lib.Textp = append(lib.Textp, gi) } } @@ -2769,7 +2780,7 @@ func (l *Loader) AssignTextSymbolOrder(libs []*sym.Library, intlibs []bool, exts lists := [2][]sym.LoaderSym{lib.Textp, lib.DupTextSyms} for i, list := range lists { for _, s := range list { - sym := Sym(s) + sym := s if !assignedToUnit.Has(sym) { textp = append(textp, sym) unit := l.SymUnit(sym) @@ -2810,7 +2821,7 @@ type ErrorReporter struct { // // Logging an error means that on exit cmd/link will delete any // output file and return a non-zero error code. -func (reporter *ErrorReporter) Errorf(s Sym, format string, args ...interface{}) { +func (reporter *ErrorReporter) Errorf(s Sym, format string, args ...any) { if s != 0 && reporter.ldr.SymName(s) != "" { // Note: Replace is needed here because symbol names might have % in them, // due to the use of LinkString for names of instantiating types. @@ -2829,7 +2840,7 @@ func (l *Loader) GetErrorReporter() *ErrorReporter { } // Errorf method logs an error message. See ErrorReporter.Errorf for details. -func (l *Loader) Errorf(s Sym, format string, args ...interface{}) { +func (l *Loader) Errorf(s Sym, format string, args ...any) { l.errorReporter.Errorf(s, format, args...) } diff --git a/src/cmd/link/internal/loader/symbolbuilder.go b/src/cmd/link/internal/loader/symbolbuilder.go index 35749f9ea93..d385dec94a1 100644 --- a/src/cmd/link/internal/loader/symbolbuilder.go +++ b/src/cmd/link/internal/loader/symbolbuilder.go @@ -139,7 +139,7 @@ func (sb *SymbolBuilder) SetRelocAdd(i int, a int64) { sb.relocs[i].SetAdd(a) } -// SetRelocAdd sets the size of the 'i'-th relocation on this sym to 'sz' +// SetRelocSiz sets the size of the 'i'-th relocation on this sym to 'sz' func (sb *SymbolBuilder) SetRelocSiz(i int, sz uint8) { sb.relocs[i].SetSiz(sz) } diff --git a/src/cmd/link/internal/loadmacho/ldmacho.go b/src/cmd/link/internal/loadmacho/ldmacho.go index 5e8022ce69b..ecd01509433 100644 --- a/src/cmd/link/internal/loadmacho/ldmacho.go +++ b/src/cmd/link/internal/loadmacho/ldmacho.go @@ -424,7 +424,7 @@ func macholoadsym(m *ldMachoObj, symtab *ldMachoSymtab) int { // Load the Mach-O file pn from f. // Symbols are written into syms, and a slice of the text symbols is returned. func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, f *bio.Reader, pkg string, length int64, pn string) (textp []loader.Sym, err error) { - errorf := func(str string, args ...interface{}) ([]loader.Sym, error) { + errorf := func(str string, args ...any) ([]loader.Sym, error) { return nil, fmt.Errorf("loadmacho: %v: %v", pn, fmt.Sprintf(str, args...)) } @@ -613,6 +613,9 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, f *bio.Reader, } if machsym.desc&(N_WEAK_REF|N_WEAK_DEF) != 0 { l.SetAttrDuplicateOK(s, true) + if machsym.desc&N_WEAK_REF != 0 { + l.SetSymWeakBinding(s, true) + } } machsym.sym = s if machsym.sectnum == 0 { // undefined diff --git a/src/cmd/link/internal/loadpe/ldpe.go b/src/cmd/link/internal/loadpe/ldpe.go index d3a050135c2..2073045c479 100644 --- a/src/cmd/link/internal/loadpe/ldpe.go +++ b/src/cmd/link/internal/loadpe/ldpe.go @@ -392,21 +392,59 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, input *bio.Read switch r.Type { case IMAGE_REL_ARM64_ADDR32: rType = objabi.R_ADDR + case IMAGE_REL_ARM64_ADDR64: + rType = objabi.R_ADDR + rSize = 8 case IMAGE_REL_ARM64_ADDR32NB: rType = objabi.R_PEIMAGEOFF + case IMAGE_REL_ARM64_BRANCH26: + rType = objabi.R_CALLARM64 + case IMAGE_REL_ARM64_PAGEBASE_REL21, + IMAGE_REL_ARM64_PAGEOFFSET_12A, + IMAGE_REL_ARM64_PAGEOFFSET_12L: + rType = objabi.R_ARM64_PCREL } } if rType == 0 { return nil, fmt.Errorf("%s: %v: unknown relocation type %v", pn, state.sectsyms[rsect], r.Type) } - var rAdd int64 + var val int64 switch rSize { default: panic("unexpected relocation size " + strconv.Itoa(int(rSize))) case 4: - rAdd = int64(int32(binary.LittleEndian.Uint32(state.sectdata[rsect][rOff:]))) + val = int64(int32(binary.LittleEndian.Uint32(state.sectdata[rsect][rOff:]))) case 8: - rAdd = int64(binary.LittleEndian.Uint64(state.sectdata[rsect][rOff:])) + val = int64(binary.LittleEndian.Uint64(state.sectdata[rsect][rOff:])) + } + var rAdd int64 + if arch.Family == sys.ARM64 { + switch r.Type { + case IMAGE_REL_ARM64_BRANCH26: + // This instruction doesn't support an addend. + case IMAGE_REL_ARM64_PAGEOFFSET_12A: + // The addend is stored in the immediate field of the instruction. + // Get the addend from the instruction. + rAdd = (val >> 10) & 0xfff + case IMAGE_REL_ARM64_PAGEOFFSET_12L: + // Same as IMAGE_REL_ARM64_PAGEOFFSET_12A, but taking into account the shift. + shift := uint32(val) >> 30 + if shift == 0 && (val>>20)&0x048 == 0x048 { // 128-bit vector load + shift = 4 + } + rAdd = ((val >> 10) & 0xfff) << shift + case IMAGE_REL_ARM64_PAGEBASE_REL21: + // The addend is stored in the immediate field of the instruction + // as a byte offset. Get the addend from the instruction and clear + // the immediate bits. + immlo := (val >> 29) & 3 + immhi := (val >> 5) & 0x7ffff + rAdd = (immhi << 2) | immlo + default: + rAdd = val + } + } else { + rAdd = val } // ld -r could generate multiple section symbols for the // same section but with different values, we have to take diff --git a/src/cmd/link/internal/loadxcoff/ldxcoff.go b/src/cmd/link/internal/loadxcoff/ldxcoff.go index fd116d54207..8c0949ec70a 100644 --- a/src/cmd/link/internal/loadxcoff/ldxcoff.go +++ b/src/cmd/link/internal/loadxcoff/ldxcoff.go @@ -42,7 +42,7 @@ func (f *xcoffBiobuf) ReadAt(p []byte, off int64) (int, error) { // loads the Xcoff file pn from f. // Symbols are written into loader, and a slice of the text symbols is returned. func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, input *bio.Reader, pkg string, length int64, pn string) (textp []loader.Sym, err error) { - errorf := func(str string, args ...interface{}) ([]loader.Sym, error) { + errorf := func(str string, args ...any) ([]loader.Sym, error) { return nil, fmt.Errorf("loadxcoff: %v: %v", pn, fmt.Sprintf(str, args...)) } diff --git a/src/cmd/link/internal/loong64/asm.go b/src/cmd/link/internal/loong64/asm.go index 6adafd38fc5..e7f3fd1d981 100644 --- a/src/cmd/link/internal/loong64/asm.go +++ b/src/cmd/link/internal/loong64/asm.go @@ -273,7 +273,7 @@ func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loade } else { ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ)) } - rela.AddAddrPlus(target.Arch, targ, int64(r.Add())) + rela.AddAddrPlus(target.Arch, targ, r.Add()) return true } @@ -496,30 +496,30 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade pc := ldr.SymValue(s) + int64(r.Off()) t := calculatePCAlignedReloc(r.Type(), ldr.SymAddr(rs)+r.Add(), pc) if r.Type() == objabi.R_LOONG64_ADDR_LO { - return int64(val&0xffc003ff | (t << 10)), noExtReloc, isOk + return val&0xffc003ff | (t << 10), noExtReloc, isOk } - return int64(val&0xfe00001f | (t << 5)), noExtReloc, isOk + return val&0xfe00001f | (t << 5), noExtReloc, isOk case objabi.R_LOONG64_TLS_LE_HI, objabi.R_LOONG64_TLS_LE_LO: t := ldr.SymAddr(rs) + r.Add() if r.Type() == objabi.R_LOONG64_TLS_LE_LO { - return int64(val&0xffc003ff | ((t & 0xfff) << 10)), noExtReloc, isOk + return val&0xffc003ff | ((t & 0xfff) << 10), noExtReloc, isOk } - return int64(val&0xfe00001f | (((t) >> 12 << 5) & 0x1ffffe0)), noExtReloc, isOk + return val&0xfe00001f | (((t) >> 12 << 5) & 0x1ffffe0), noExtReloc, isOk case objabi.R_CALLLOONG64, objabi.R_JMPLOONG64: pc := ldr.SymValue(s) + int64(r.Off()) t := ldr.SymAddr(rs) + r.Add() - pc - return int64(val&0xfc000000 | (((t >> 2) & 0xffff) << 10) | (((t >> 2) & 0x3ff0000) >> 16)), noExtReloc, isOk + return val&0xfc000000 | (((t >> 2) & 0xffff) << 10) | (((t >> 2) & 0x3ff0000) >> 16), noExtReloc, isOk case objabi.R_JMP16LOONG64, objabi.R_JMP21LOONG64: pc := ldr.SymValue(s) + int64(r.Off()) t := ldr.SymAddr(rs) + r.Add() - pc if r.Type() == objabi.R_JMP16LOONG64 { - return int64(val&0xfc0003ff | (((t >> 2) & 0xffff) << 10)), noExtReloc, isOk + return val&0xfc0003ff | (((t >> 2) & 0xffff) << 10), noExtReloc, isOk } - return int64(val&0xfc0003e0 | (((t >> 2) & 0xffff) << 10) | (((t >> 2) & 0x1f0000) >> 16)), noExtReloc, isOk + return val&0xfc0003e0 | (((t >> 2) & 0xffff) << 10) | (((t >> 2) & 0x1f0000) >> 16), noExtReloc, isOk case objabi.R_LOONG64_TLS_IE_HI, objabi.R_LOONG64_TLS_IE_LO: @@ -540,9 +540,9 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade case objabi.R_LOONG64_ADD64, objabi.R_LOONG64_SUB64: if r.Type() == objabi.R_LOONG64_ADD64 { - return int64(val + ldr.SymAddr(rs) + r.Add()), noExtReloc, isOk + return val + ldr.SymAddr(rs) + r.Add(), noExtReloc, isOk } - return int64(val - (ldr.SymAddr(rs) + r.Add())), noExtReloc, isOk + return val - (ldr.SymAddr(rs) + r.Add()), noExtReloc, isOk } return val, 0, false @@ -628,7 +628,7 @@ func trampoline(ctxt *ld.Link, ldr *loader.Loader, ri int, rs, s loader.Sym) { for i := 0; ; i++ { oName := ldr.SymName(rs) name := oName + fmt.Sprintf("%+x-tramp%d", r.Add(), i) - tramp = ldr.LookupOrCreateSym(name, int(ldr.SymVersion(rs))) + tramp = ldr.LookupOrCreateSym(name, ldr.SymVersion(rs)) ldr.SetAttrReachable(tramp, true) if ldr.SymType(tramp) == sym.SDYNIMPORT { // don't reuse trampoline defined in other module diff --git a/src/cmd/link/internal/mips64/asm.go b/src/cmd/link/internal/mips64/asm.go index e82d9861841..a86ded65f28 100644 --- a/src/cmd/link/internal/mips64/asm.go +++ b/src/cmd/link/internal/mips64/asm.go @@ -298,9 +298,9 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade objabi.R_ADDRMIPSU: t := ldr.SymValue(rs) + r.Add() if r.Type() == objabi.R_ADDRMIPS { - return int64(val&0xffff0000 | t&0xffff), noExtReloc, isOk + return val&0xffff0000 | t&0xffff, noExtReloc, isOk } - return int64(val&0xffff0000 | ((t+1<<15)>>16)&0xffff), noExtReloc, isOk + return val&0xffff0000 | ((t+1<<15)>>16)&0xffff, noExtReloc, isOk case objabi.R_ADDRMIPSTLS: // thread pointer is at 0x7000 offset from the start of TLS data area t := ldr.SymValue(rs) + r.Add() - 0x7000 @@ -312,12 +312,12 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade if t < -32768 || t >= 32678 { ldr.Errorf(s, "TLS offset out of range %d", t) } - return int64(val&0xffff0000 | t&0xffff), noExtReloc, isOk + return val&0xffff0000 | t&0xffff, noExtReloc, isOk case objabi.R_CALLMIPS, objabi.R_JMPMIPS: // Low 26 bits = (S + A) >> 2 t := ldr.SymValue(rs) + r.Add() - return int64(val&0xfc000000 | (t>>2)&^0xfc000000), noExtReloc, isOk + return val&0xfc000000 | (t>>2)&^0xfc000000, noExtReloc, isOk } return val, 0, false diff --git a/src/cmd/link/internal/ppc64/asm.go b/src/cmd/link/internal/ppc64/asm.go index af7cddff7f0..5ceab3aa5a1 100644 --- a/src/cmd/link/internal/ppc64/asm.go +++ b/src/cmd/link/internal/ppc64/asm.go @@ -853,7 +853,7 @@ func addelfdynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s lo } else { ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ)) } - rela.AddAddrPlus(target.Arch, targ, int64(r.Add())) + rela.AddAddrPlus(target.Arch, targ, r.Add()) // Not mark r done here. So we still apply it statically, // so in the file content we'll also have the right offset @@ -1208,7 +1208,7 @@ func trampoline(ctxt *ld.Link, ldr *loader.Loader, ri int, rs, s loader.Sym) { // Look up the trampoline in case it already exists - tramp = ldr.LookupOrCreateSym(name, int(ldr.SymVersion(rs))) + tramp = ldr.LookupOrCreateSym(name, ldr.SymVersion(rs)) if oName == "runtime.deferreturn" { ldr.SetIsDeferReturnTramp(tramp, true) } diff --git a/src/cmd/link/internal/riscv64/asm.go b/src/cmd/link/internal/riscv64/asm.go index f8565837277..9d69152af0e 100644 --- a/src/cmd/link/internal/riscv64/asm.go +++ b/src/cmd/link/internal/riscv64/asm.go @@ -497,7 +497,7 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade } immMask := int64(riscv.JTypeImmMask) - val = (val &^ immMask) | int64(imm) + val = (val &^ immMask) | imm return val, 0, true @@ -719,7 +719,7 @@ func trampoline(ctxt *ld.Link, ldr *loader.Loader, ri int, rs, s loader.Sym) { if r.Add() != 0 { name = fmt.Sprintf("%s%+x-tramp%d", oName, r.Add(), i) } - tramp = ldr.LookupOrCreateSym(name, int(ldr.SymVersion(rs))) + tramp = ldr.LookupOrCreateSym(name, ldr.SymVersion(rs)) ldr.SetAttrReachable(tramp, true) if ldr.SymType(tramp) == sym.SDYNIMPORT { // Do not reuse trampoline defined in other module. @@ -744,7 +744,7 @@ func trampoline(ctxt *ld.Link, ldr *loader.Loader, ri int, rs, s loader.Sym) { if ldr.SymType(tramp) == 0 { trampb := ldr.MakeSymbolUpdater(tramp) ctxt.AddTramp(trampb, ldr.SymType(s)) - genCallTramp(ctxt.Arch, ctxt.LinkMode, ldr, trampb, rs, int64(r.Add())) + genCallTramp(ctxt.Arch, ctxt.LinkMode, ldr, trampb, rs, r.Add()) } sb := ldr.MakeSymbolUpdater(s) if ldr.SymValue(rs) == 0 { diff --git a/src/cmd/link/internal/sym/segment.go b/src/cmd/link/internal/sym/segment.go index c889e71ad64..d397b845707 100644 --- a/src/cmd/link/internal/sym/segment.go +++ b/src/cmd/link/internal/sym/segment.go @@ -52,7 +52,7 @@ type Section struct { Vaddr uint64 Length uint64 Seg *Segment - Elfsect interface{} // an *ld.ElfShdr + Elfsect any // an *ld.ElfShdr Reloff uint64 Rellen uint64 // Relcount is the number of *host* relocations applied to this section diff --git a/src/cmd/link/internal/sym/symkind.go b/src/cmd/link/internal/sym/symkind.go index 0671d9d724d..8709e7b48f8 100644 --- a/src/cmd/link/internal/sym/symkind.go +++ b/src/cmd/link/internal/sym/symkind.go @@ -41,31 +41,33 @@ type SymKind uint8 // //go:generate stringer -type=SymKind const ( + // An otherwise invalid zero value for the type. Sxxx SymKind = iota - STEXT - STEXTFIPSSTART - STEXTFIPS - STEXTFIPSEND - STEXTEND - SELFRXSECT - SMACHOPLT + // The text segment, containing executable instructions. + STEXT // General executable code. + STEXTFIPSSTART // Start of FIPS text section. + STEXTFIPS // Instructions hashed for FIPS checks. + STEXTFIPSEND // End of FIPS text section. + STEXTEND // End of text section. + SELFRXSECT // Executable PLT; PPC64 .glink. + SMACHOPLT // Mach-O PLT. - // Read-only sections. - STYPE - SSTRING - SGOSTRING - SGOFUNC - SGCBITS - SRODATA - SRODATAFIPSSTART - SRODATAFIPS - SRODATAFIPSEND - SRODATAEND - SFUNCTAB + // Read-only, non-executable, segment. + STYPE // Type descriptors. + SSTRING // Used only for XCOFF runtime.rodata symbol? + SGOSTRING // Go string constants. + SGOFUNC // Function descriptors and funcdata symbols. + SGCBITS // GC bit masks and programs. + SRODATA // General read-only data. + SRODATAFIPSSTART // Start of FIPS read-only data. + SRODATAFIPS // FIPS read-only data. + SRODATAFIPSEND // End of FIPS read-only data. + SRODATAEND // End of read-only data. + SFUNCTAB // Appears to be unused, except for runtime.etypes. + SPCLNTAB // Pclntab data. + SELFROSECT // ELF read-only data: relocs, dynamic linking info. - SELFROSECT - - // Read-only sections with relocations. + // Read-only, non-executable, dynamically relocatable segment. // // Types STYPE-SFUNCTAB above are written to the .rodata section by default. // When linking a shared object, some conceptually "read only" types need to @@ -84,55 +86,57 @@ const ( SGCBITSRELRO SRODATARELRO SFUNCTABRELRO - SELFRELROSECT - SMACHORELROSECT - // Part of .data.rel.ro if it exists, otherwise part of .rodata. - STYPELINK - SITABLINK - SSYMTAB - SPCLNTAB + SELFRELROSECT // ELF-specific read-only relocatable: PLT, etc. + SMACHORELROSECT // Mach-O specific read-only relocatable. - // Writable sections. + STYPELINK // Type links. + SITABLINK // Itab links. + + // Allocated writable segment. SFirstWritable - SBUILDINFO - SFIPSINFO - SELFSECT - SMACHO - SMACHOGOT - SWINDOWS - SELFGOT - SNOPTRDATA - SNOPTRDATAFIPSSTART - SNOPTRDATAFIPS - SNOPTRDATAFIPSEND - SNOPTRDATAEND - SINITARR - SDATA - SDATAFIPSSTART - SDATAFIPS - SDATAFIPSEND - SDATAEND - SXCOFFTOC - SBSS - SNOPTRBSS - SLIBFUZZER_8BIT_COUNTER - SCOVERAGE_COUNTER - SCOVERAGE_AUXVAR - STLSBSS - SXREF - SMACHOSYMSTR - SMACHOSYMTAB - SMACHOINDIRECTPLT - SMACHOINDIRECTGOT - SFILEPATH - SDYNIMPORT - SHOSTOBJ - SUNDEFEXT // Undefined symbol for resolution by external linker + SBUILDINFO // debug/buildinfo data (why is this writable?). + SFIPSINFO // go:fipsinfo aka crypto/internal/fips140/check.Linkinfo (why is this writable)? + SELFSECT // .got.plt, .plt, .dynamic where appropriate. + SMACHO // Used only for .llvmasm? + SMACHOGOT // Mach-O GOT. + SWINDOWS // Windows dynamic symbols. + SELFGOT // Writable ELF GOT section. + SNOPTRDATA // Data with no heap pointers. + SNOPTRDATAFIPSSTART // Start of FIPS non-pointer writable data. + SNOPTRDATAFIPS // FIPS non-pointer writable data. + SNOPTRDATAFIPSEND // End of FIPS non-pointer writable data. + SNOPTRDATAEND // End of data with no heap pointers. + SINITARR // ELF .init_array section. + SDATA // Data that may have heap pointers. + SDATAFIPSSTART // Start of FIPS writable data. + SDATAFIPS // FIPS writable data. + SDATAFIPSEND // End of FIPS writable data. + SDATAEND // End of data that may have heap pointers. + SXCOFFTOC // AIX TOC entries. - // Sections for debugging information + // Allocated zero-initialized segment. + SBSS // Zeroed data that may have heap pointers. + SNOPTRBSS // Zeroed data with no heap pointers. + SLIBFUZZER_8BIT_COUNTER // Fuzzer counters. + SCOVERAGE_COUNTER // Coverage counters. + SCOVERAGE_AUXVAR // Compiler generated coverage symbols. + STLSBSS // Thread-local zeroed data. + + // Unallocated segment. + SFirstUnallocated + SXREF // Reference from non-Go object file. + SMACHOSYMSTR // Mach-O string table. + SMACHOSYMTAB // Mach-O symbol table. + SMACHOINDIRECTPLT // Mach-O indirect PLT. + SMACHOINDIRECTGOT // Mach-O indirect GOT. + SDYNIMPORT // Reference to symbol defined in shared library. + SHOSTOBJ // Symbol defined in non-Go object file. + SUNDEFEXT // Undefined symbol for resolution by external linker. + + // Unallocated DWARF debugging segment. SDWARFSECT - // DWARF symbol types + // DWARF symbol types created by compiler or linker. SDWARFCUINFO SDWARFCONST SDWARFFCN @@ -144,9 +148,9 @@ const ( SDWARFLINES SDWARFADDR - // SEH symbol types - SSEHUNWINDINFO - SSEHSECT + // SEH symbol types. These are probably allocated at run time. + SSEHUNWINDINFO // Compiler generated Windows SEH info. + SSEHSECT // Windows SEH data. ) // AbiSymKindToSymKind maps values read from object files (which are diff --git a/src/cmd/link/internal/sym/symkind_string.go b/src/cmd/link/internal/sym/symkind_string.go index 5395c9571ba..019e7c746a6 100644 --- a/src/cmd/link/internal/sym/symkind_string.go +++ b/src/cmd/link/internal/sym/symkind_string.go @@ -27,73 +27,72 @@ func _() { _ = x[SRODATAFIPSEND-16] _ = x[SRODATAEND-17] _ = x[SFUNCTAB-18] - _ = x[SELFROSECT-19] - _ = x[STYPERELRO-20] - _ = x[SSTRINGRELRO-21] - _ = x[SGOSTRINGRELRO-22] - _ = x[SGOFUNCRELRO-23] - _ = x[SGCBITSRELRO-24] - _ = x[SRODATARELRO-25] - _ = x[SFUNCTABRELRO-26] - _ = x[SELFRELROSECT-27] - _ = x[SMACHORELROSECT-28] - _ = x[STYPELINK-29] - _ = x[SITABLINK-30] - _ = x[SSYMTAB-31] - _ = x[SPCLNTAB-32] - _ = x[SFirstWritable-33] - _ = x[SBUILDINFO-34] - _ = x[SFIPSINFO-35] - _ = x[SELFSECT-36] - _ = x[SMACHO-37] - _ = x[SMACHOGOT-38] - _ = x[SWINDOWS-39] - _ = x[SELFGOT-40] - _ = x[SNOPTRDATA-41] - _ = x[SNOPTRDATAFIPSSTART-42] - _ = x[SNOPTRDATAFIPS-43] - _ = x[SNOPTRDATAFIPSEND-44] - _ = x[SNOPTRDATAEND-45] - _ = x[SINITARR-46] - _ = x[SDATA-47] - _ = x[SDATAFIPSSTART-48] - _ = x[SDATAFIPS-49] - _ = x[SDATAFIPSEND-50] - _ = x[SDATAEND-51] - _ = x[SXCOFFTOC-52] - _ = x[SBSS-53] - _ = x[SNOPTRBSS-54] - _ = x[SLIBFUZZER_8BIT_COUNTER-55] - _ = x[SCOVERAGE_COUNTER-56] - _ = x[SCOVERAGE_AUXVAR-57] - _ = x[STLSBSS-58] + _ = x[SPCLNTAB-19] + _ = x[SELFROSECT-20] + _ = x[STYPERELRO-21] + _ = x[SSTRINGRELRO-22] + _ = x[SGOSTRINGRELRO-23] + _ = x[SGOFUNCRELRO-24] + _ = x[SGCBITSRELRO-25] + _ = x[SRODATARELRO-26] + _ = x[SFUNCTABRELRO-27] + _ = x[SELFRELROSECT-28] + _ = x[SMACHORELROSECT-29] + _ = x[STYPELINK-30] + _ = x[SITABLINK-31] + _ = x[SFirstWritable-32] + _ = x[SBUILDINFO-33] + _ = x[SFIPSINFO-34] + _ = x[SELFSECT-35] + _ = x[SMACHO-36] + _ = x[SMACHOGOT-37] + _ = x[SWINDOWS-38] + _ = x[SELFGOT-39] + _ = x[SNOPTRDATA-40] + _ = x[SNOPTRDATAFIPSSTART-41] + _ = x[SNOPTRDATAFIPS-42] + _ = x[SNOPTRDATAFIPSEND-43] + _ = x[SNOPTRDATAEND-44] + _ = x[SINITARR-45] + _ = x[SDATA-46] + _ = x[SDATAFIPSSTART-47] + _ = x[SDATAFIPS-48] + _ = x[SDATAFIPSEND-49] + _ = x[SDATAEND-50] + _ = x[SXCOFFTOC-51] + _ = x[SBSS-52] + _ = x[SNOPTRBSS-53] + _ = x[SLIBFUZZER_8BIT_COUNTER-54] + _ = x[SCOVERAGE_COUNTER-55] + _ = x[SCOVERAGE_AUXVAR-56] + _ = x[STLSBSS-57] + _ = x[SFirstUnallocated-58] _ = x[SXREF-59] _ = x[SMACHOSYMSTR-60] _ = x[SMACHOSYMTAB-61] _ = x[SMACHOINDIRECTPLT-62] _ = x[SMACHOINDIRECTGOT-63] - _ = x[SFILEPATH-64] - _ = x[SDYNIMPORT-65] - _ = x[SHOSTOBJ-66] - _ = x[SUNDEFEXT-67] - _ = x[SDWARFSECT-68] - _ = x[SDWARFCUINFO-69] - _ = x[SDWARFCONST-70] - _ = x[SDWARFFCN-71] - _ = x[SDWARFABSFCN-72] - _ = x[SDWARFTYPE-73] - _ = x[SDWARFVAR-74] - _ = x[SDWARFRANGE-75] - _ = x[SDWARFLOC-76] - _ = x[SDWARFLINES-77] - _ = x[SDWARFADDR-78] - _ = x[SSEHUNWINDINFO-79] - _ = x[SSEHSECT-80] + _ = x[SDYNIMPORT-64] + _ = x[SHOSTOBJ-65] + _ = x[SUNDEFEXT-66] + _ = x[SDWARFSECT-67] + _ = x[SDWARFCUINFO-68] + _ = x[SDWARFCONST-69] + _ = x[SDWARFFCN-70] + _ = x[SDWARFABSFCN-71] + _ = x[SDWARFTYPE-72] + _ = x[SDWARFVAR-73] + _ = x[SDWARFRANGE-74] + _ = x[SDWARFLOC-75] + _ = x[SDWARFLINES-76] + _ = x[SDWARFADDR-77] + _ = x[SSEHUNWINDINFO-78] + _ = x[SSEHSECT-79] } -const _SymKind_name = "SxxxSTEXTSTEXTFIPSSTARTSTEXTFIPSSTEXTFIPSENDSTEXTENDSELFRXSECTSMACHOPLTSTYPESSTRINGSGOSTRINGSGOFUNCSGCBITSSRODATASRODATAFIPSSTARTSRODATAFIPSSRODATAFIPSENDSRODATAENDSFUNCTABSELFROSECTSTYPERELROSSTRINGRELROSGOSTRINGRELROSGOFUNCRELROSGCBITSRELROSRODATARELROSFUNCTABRELROSELFRELROSECTSMACHORELROSECTSTYPELINKSITABLINKSSYMTABSPCLNTABSFirstWritableSBUILDINFOSFIPSINFOSELFSECTSMACHOSMACHOGOTSWINDOWSSELFGOTSNOPTRDATASNOPTRDATAFIPSSTARTSNOPTRDATAFIPSSNOPTRDATAFIPSENDSNOPTRDATAENDSINITARRSDATASDATAFIPSSTARTSDATAFIPSSDATAFIPSENDSDATAENDSXCOFFTOCSBSSSNOPTRBSSSLIBFUZZER_8BIT_COUNTERSCOVERAGE_COUNTERSCOVERAGE_AUXVARSTLSBSSSXREFSMACHOSYMSTRSMACHOSYMTABSMACHOINDIRECTPLTSMACHOINDIRECTGOTSFILEPATHSDYNIMPORTSHOSTOBJSUNDEFEXTSDWARFSECTSDWARFCUINFOSDWARFCONSTSDWARFFCNSDWARFABSFCNSDWARFTYPESDWARFVARSDWARFRANGESDWARFLOCSDWARFLINESSDWARFADDRSSEHUNWINDINFOSSEHSECT" +const _SymKind_name = "SxxxSTEXTSTEXTFIPSSTARTSTEXTFIPSSTEXTFIPSENDSTEXTENDSELFRXSECTSMACHOPLTSTYPESSTRINGSGOSTRINGSGOFUNCSGCBITSSRODATASRODATAFIPSSTARTSRODATAFIPSSRODATAFIPSENDSRODATAENDSFUNCTABSPCLNTABSELFROSECTSTYPERELROSSTRINGRELROSGOSTRINGRELROSGOFUNCRELROSGCBITSRELROSRODATARELROSFUNCTABRELROSELFRELROSECTSMACHORELROSECTSTYPELINKSITABLINKSFirstWritableSBUILDINFOSFIPSINFOSELFSECTSMACHOSMACHOGOTSWINDOWSSELFGOTSNOPTRDATASNOPTRDATAFIPSSTARTSNOPTRDATAFIPSSNOPTRDATAFIPSENDSNOPTRDATAENDSINITARRSDATASDATAFIPSSTARTSDATAFIPSSDATAFIPSENDSDATAENDSXCOFFTOCSBSSSNOPTRBSSSLIBFUZZER_8BIT_COUNTERSCOVERAGE_COUNTERSCOVERAGE_AUXVARSTLSBSSSFirstUnallocatedSXREFSMACHOSYMSTRSMACHOSYMTABSMACHOINDIRECTPLTSMACHOINDIRECTGOTSDYNIMPORTSHOSTOBJSUNDEFEXTSDWARFSECTSDWARFCUINFOSDWARFCONSTSDWARFFCNSDWARFABSFCNSDWARFTYPESDWARFVARSDWARFRANGESDWARFLOCSDWARFLINESSDWARFADDRSSEHUNWINDINFOSSEHSECT" -var _SymKind_index = [...]uint16{0, 4, 9, 23, 32, 44, 52, 62, 71, 76, 83, 92, 99, 106, 113, 129, 140, 154, 164, 172, 182, 192, 204, 218, 230, 242, 254, 267, 280, 295, 304, 313, 320, 328, 342, 352, 361, 369, 375, 384, 392, 399, 409, 428, 442, 459, 472, 480, 485, 499, 508, 520, 528, 537, 541, 550, 573, 590, 606, 613, 618, 630, 642, 659, 676, 685, 695, 703, 712, 722, 734, 745, 754, 766, 776, 785, 796, 805, 816, 826, 840, 848} +var _SymKind_index = [...]uint16{0, 4, 9, 23, 32, 44, 52, 62, 71, 76, 83, 92, 99, 106, 113, 129, 140, 154, 164, 172, 180, 190, 200, 212, 226, 238, 250, 262, 275, 288, 303, 312, 321, 335, 345, 354, 362, 368, 377, 385, 392, 402, 421, 435, 452, 465, 473, 478, 492, 501, 513, 521, 530, 534, 543, 566, 583, 599, 606, 623, 628, 640, 652, 669, 686, 696, 704, 713, 723, 735, 746, 755, 767, 777, 786, 797, 806, 817, 827, 841, 849} func (i SymKind) String() string { if i >= SymKind(len(_SymKind_index)-1) { diff --git a/src/cmd/link/internal/wasm/asm.go b/src/cmd/link/internal/wasm/asm.go index d03102cc6be..65f79c80120 100644 --- a/src/cmd/link/internal/wasm/asm.go +++ b/src/cmd/link/internal/wasm/asm.go @@ -127,7 +127,6 @@ func asmb(ctxt *ld.Link, ldr *loader.Loader) { ldr.SymSect(ldr.Lookup("runtime.rodata", 0)), ldr.SymSect(ldr.Lookup("runtime.typelink", 0)), ldr.SymSect(ldr.Lookup("runtime.itablink", 0)), - ldr.SymSect(ldr.Lookup("runtime.symtab", 0)), ldr.SymSect(ldr.Lookup("runtime.pclntab", 0)), ldr.SymSect(ldr.Lookup("runtime.noptrdata", 0)), ldr.SymSect(ldr.Lookup("runtime.data", 0)), @@ -302,11 +301,11 @@ func writeTypeSec(ctxt *ld.Link, types []*wasmFuncType) { ctxt.Out.WriteByte(0x60) // functype writeUleb128(ctxt.Out, uint64(len(t.Params))) for _, v := range t.Params { - ctxt.Out.WriteByte(byte(v)) + ctxt.Out.WriteByte(v) } writeUleb128(ctxt.Out, uint64(len(t.Results))) for _, v := range t.Results { - ctxt.Out.WriteByte(byte(v)) + ctxt.Out.WriteByte(v) } } diff --git a/src/cmd/link/link_test.go b/src/cmd/link/link_test.go index 0125ba8e0f5..31822d21f39 100644 --- a/src/cmd/link/link_test.go +++ b/src/cmd/link/link_test.go @@ -1532,11 +1532,13 @@ func TestFlagS(t *testing.T) { } cmd = testenv.Command(t, testenv.GoToolPath(t), "tool", "nm", exe) out, err = cmd.CombinedOutput() - if err != nil && !errors.As(err, new(*exec.ExitError)) { - // Error exit is fine as it may have no symbols. - // On darwin we need to emit dynamic symbol references so it - // actually has some symbols, and nm succeeds. - t.Errorf("(mode=%s) go tool nm failed: %v\n%s", mode, err, out) + if err != nil { + if _, ok := errors.AsType[*exec.ExitError](err); !ok { + // Error exit is fine as it may have no symbols. + // On darwin we need to emit dynamic symbol references so it + // actually has some symbols, and nm succeeds. + t.Errorf("(mode=%s) go tool nm failed: %v\n%s", mode, err, out) + } } for _, s := range syms { if bytes.Contains(out, []byte(s)) { diff --git a/src/cmd/trace/gstate.go b/src/cmd/trace/gstate.go index 9c3da66217f..46638a1cdbe 100644 --- a/src/cmd/trace/gstate.go +++ b/src/cmd/trace/gstate.go @@ -147,7 +147,7 @@ func (gs *gState[R]) start(ts trace.Time, resource R, ctx *traceContext) { Name: gs.startCause.name, Start: ctx.elapsed(gs.startCause.time), End: ctx.elapsed(ts), - FromResource: uint64(gs.startCause.resource), + FromResource: gs.startCause.resource, ToResource: uint64(resource), FromStack: ctx.Stack(viewerFrames(gs.startCause.stack)), }) diff --git a/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/arg.go b/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/arg.go index 7898c27366a..2b96b416f60 100644 --- a/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/arg.go +++ b/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/arg.go @@ -22,13 +22,23 @@ package riscv64asm // // - arg_fs3: a floating point register rs3 encoded in rs3[31:27] field // +// - arg_vd: a vector register vd encoded in vd[11:7] field +// +// - arg_vm: indicates the presence of the mask register, encoded in vm[25] field +// +// - arg_vs1: a vector register vs1 encoded in vs1[19:15] field +// +// - arg_vs2: a vector register vs3 encoded in vs2[20:24] field +// +// - arg_vs3: a vector register vs3 encoded in vs3[11:7] field +// // - arg_csr: a control status register encoded in csr[31:20] field // // - arg_rs1_mem: source register with offset in load commands // // - arg_rs1_store: source register with offset in store commands // -// - arg_rs1_amo: source register with offset in atomic commands +// - arg_rs1_ptr: source register used as an address with no offset in atomic and vector commands // // - arg_pred: predecessor memory ordering information encoded in pred[27:24] field // For details, please refer to chapter 2.7 of ISA manual volume 1 @@ -46,6 +56,14 @@ package riscv64asm // // - arg_imm20: an U-type immediate encoded in imm20[31:12] field // +// - arg_simm5: a 5 bit signed immediate encoded in imm[19:15] field +// +// - arg_zimm5: a 5 bit unsigned immediate encoded in imm[19:15] field +// +// - arg_vtype_zimm10: a 10 bit unsigned immediate encoded in vtypei[29:20] field +// +// - arg_vtype_zimm11: an 11 bit unsigned immediate encoded in vtypei[30:20] field +// // - arg_jimm20: a J-type immediate encoded in jimm20[31:12] field // // - arg_shamt5: a shift amount encoded in shamt5[24:20] field @@ -65,9 +83,14 @@ const ( arg_fs1 arg_fs2 arg_fs3 + arg_vd + arg_vm + arg_vs1 + arg_vs2 + arg_vs3 arg_csr - arg_rs1_amo + arg_rs1_ptr arg_rs1_mem arg_rs1_store @@ -77,6 +100,10 @@ const ( arg_zimm arg_imm12 arg_simm12 + arg_simm5 + arg_zimm5 + arg_vtype_zimm10 + arg_vtype_zimm11 arg_bimm12 arg_imm20 arg_jimm20 diff --git a/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/decode.go b/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/decode.go index d78fef9e396..390edfa936c 100644 --- a/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/decode.go +++ b/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/decode.go @@ -63,16 +63,23 @@ Search: // Decode args. var args Args - for j, aop := range f.args { + k := 0 + for _, aop := range f.args { if aop == 0 { break } arg := decodeArg(aop, x, i) - if arg == nil && f.op != C_NOP { - // Cannot decode argument. - continue Search + if arg == nil { + if aop == arg_vm { + continue + } + if f.op != C_NOP { + // Cannot decode argument. + continue Search + } } - args[j] = arg + args[k] = arg + k++ } if length == 2 { @@ -119,8 +126,27 @@ func decodeArg(aop argType, x uint32, index int) Arg { case arg_fs3: return F0 + Reg((x>>27)&((1<<5)-1)) - case arg_rs1_amo: - return AmoReg{X0 + Reg((x>>15)&((1<<5)-1))} + case arg_vd: + return V0 + Reg((x>>7)&((1<<5)-1)) + + case arg_vm: + if x&(1<<25) == 0 { + return V0 + } else { + return nil + } + + case arg_vs1: + return V0 + Reg((x>>15)&((1<<5)-1)) + + case arg_vs2: + return V0 + Reg((x>>20)&((1<<5)-1)) + + case arg_vs3: + return V0 + Reg((x>>7)&((1<<5)-1)) + + case arg_rs1_ptr: + return RegPtr{X0 + Reg((x>>15)&((1<<5)-1))} case arg_rs1_mem: imm := x >> 20 @@ -198,6 +224,26 @@ func decodeArg(aop argType, x uint32, index int) Arg { } return Simm{int32(imm), true, 13} + case arg_simm5: + imm := x << 12 >> 27 + // Sign-extend + if imm>>uint32(5-1) == 1 { + imm |= 0x7ffffff << 5 + } + return Simm{int32(imm), true, 5} + + case arg_zimm5: + imm := x << 12 >> 27 + return Uimm{imm, true} + + case arg_vtype_zimm10: + imm := x << 2 >> 22 + return VType(imm) + + case arg_vtype_zimm11: + imm := x << 1 >> 21 + return VType(imm) + case arg_rd_p, arg_rs2_p: return X8 + Reg((x>>2)&((1<<3)-1)) diff --git a/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/gnu.go b/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/gnu.go index 3ee0449640a..d8febcc0394 100644 --- a/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/gnu.go +++ b/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/gnu.go @@ -12,15 +12,23 @@ import ( // This form typically matches the syntax defined in the RISC-V Instruction Set Manual. See // https://github.com/riscv/riscv-isa-manual/releases/download/Ratified-IMAFDQC/riscv-spec-20191213.pdf func GNUSyntax(inst Inst) string { - op := strings.ToLower(inst.Op.String()) + hasVectorArg := false var args []string for _, a := range inst.Args { if a == nil { break } args = append(args, strings.ToLower(a.String())) + if r, ok := a.(Reg); ok { + hasVectorArg = hasVectorArg || (r >= V0 && r <= V31) + } } + if hasVectorArg { + return gnuVectorOp(inst, args) + } + + op := strings.ToLower(inst.Op.String()) switch inst.Op { case ADDI, ADDIW, ANDI, ORI, SLLI, SLLIW, SRAI, SRAIW, SRLI, SRLIW, XORI: if inst.Op == ADDI { @@ -324,6 +332,12 @@ func GNUSyntax(inst Inst) string { args[1] = args[2] args = args[:len(args)-1] } + + case VSETVLI, VSETIVLI: + args[0], args[2] = args[2], strings.ReplaceAll(args[0], " ", "") + + case VSETVL: + args[0], args[2] = args[2], args[0] } if args != nil { @@ -331,3 +345,60 @@ func GNUSyntax(inst Inst) string { } return op } + +func gnuVectorOp(inst Inst, args []string) string { + // Instruction is either a vector load, store or an arithmetic + // operation. We can use the inst.Enc to figure out which. Whatever + // it is, it has at least one argument. + + rawArgs := inst.Args[:] + + var mask string + var op string + if inst.Enc&(1<<25) == 0 { + if implicitMask(inst.Op) { + mask = "v0" + } else { + mask = "v0.t" + args = args[1:] + rawArgs = rawArgs[1:] + } + } + + if len(args) > 1 { + if inst.Enc&0x7f == 0x7 || inst.Enc&0x7f == 0x27 { + // It's a load or a store + if len(args) >= 2 { + args[0], args[len(args)-1] = args[len(args)-1], args[0] + } + op = pseudoRVVLoad(inst.Op) + } else { + // It's an arithmetic instruction + + op, args = pseudoRVVArith(inst.Op, rawArgs, args) + + if len(args) == 3 { + if imaOrFma(inst.Op) { + args[0], args[2] = args[2], args[0] + } else { + args[0], args[1], args[2] = args[2], args[0], args[1] + } + } else if len(args) == 2 { + args[0], args[1] = args[1], args[0] + } + } + } + + // The mask is always the last argument + + if mask != "" { + args = append(args, mask) + } + + if op == "" { + op = inst.Op.String() + } + op = strings.ToLower(op) + + return op + " " + strings.Join(args, ",") +} diff --git a/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/inst.go b/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/inst.go index 3c13567cb04..51dc5bcded1 100644 --- a/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/inst.go +++ b/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/inst.go @@ -59,7 +59,7 @@ func (i Inst) String() string { type Reg uint16 const ( - // General-purpose register + // General-purpose registers X0 Reg = iota X1 X2 @@ -93,7 +93,7 @@ const ( X30 X31 - //Float point register + // Floating point registers F0 F1 F2 @@ -126,6 +126,40 @@ const ( F29 F30 F31 + + // Vector registers + V0 + V1 + V2 + V3 + V4 + V5 + V6 + V7 + V8 + V9 + V10 + V11 + V12 + V13 + V14 + V15 + V16 + V17 + V18 + V19 + V20 + V21 + V22 + V23 + V24 + V25 + V26 + V27 + V28 + V29 + V30 + V31 ) func (r Reg) String() string { @@ -136,6 +170,9 @@ func (r Reg) String() string { case r >= F0 && r <= F31: return fmt.Sprintf("f%d", r-F0) + case r >= V0 && r <= V31: + return fmt.Sprintf("v%d", r-V0) + default: return fmt.Sprintf("Unknown(%d)", r) } @@ -455,13 +492,13 @@ func (si Simm) String() string { return fmt.Sprintf("%#x", si.Imm) } -// An AmoReg is an atomic address register used in AMO instructions -type AmoReg struct { +// A RegPtr is an address register with no offset +type RegPtr struct { reg Reg // Avoid promoted String method } -func (amoReg AmoReg) String() string { - return fmt.Sprintf("(%s)", amoReg.reg) +func (regPtr RegPtr) String() string { + return fmt.Sprintf("(%s)", regPtr.reg) } // A RegOffset is a register with offset value @@ -493,3 +530,21 @@ func (memOrder MemOrder) String() string { } return str } + +// A VType represents the vtype field of VSETIVLI and VSETVLI instructions +type VType uint32 + +var vlmulName = []string{"M1", "M2", "M4", "M8", "", "MF8", "MF4", "MF2"} +var vsewName = []string{"E8", "E16", "E32", "E64", "", "", "", ""} +var vtaName = []string{"TU", "TA"} +var vmaName = []string{"MU", "MA"} + +func (vtype VType) String() string { + + vlmul := vtype & 0x7 + vsew := (vtype >> 3) & 0x7 + vta := (vtype >> 6) & 0x1 + vma := (vtype >> 7) & 0x1 + + return fmt.Sprintf("%s, %s, %s, %s", vsewName[vsew], vlmulName[vlmul], vtaName[vta], vmaName[vma]) +} diff --git a/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/plan9x.go b/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/plan9x.go index 367122d940f..b68deb39a3e 100644 --- a/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/plan9x.go +++ b/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/plan9x.go @@ -26,12 +26,20 @@ func GoSyntax(inst Inst, pc uint64, symname func(uint64) (string, uint64), text symname = func(uint64) (string, uint64) { return "", 0 } } + hasVectorArg := false var args []string for _, a := range inst.Args { if a == nil { break } args = append(args, plan9Arg(&inst, pc, symname, a)) + if r, ok := a.(Reg); ok { + hasVectorArg = hasVectorArg || (r >= V0 && r <= V31) + } + } + + if hasVectorArg { + return plan9VectorOp(inst, args) } op := inst.Op.String() @@ -180,6 +188,22 @@ func GoSyntax(inst Inst, pc uint64, symname func(uint64) (string, uint64), text FNMSUB_D, FNMSUB_H, FNMSUB_Q, FNMSUB_S: args[1], args[3] = args[3], args[1] + case FMV_W_X: + if inst.Args[1].(Reg) == X0 { + args[1] = "$(0.0)" + } + fallthrough + case FMV_X_W: + op = "MOVF" + + case FMV_D_X: + if inst.Args[1].(Reg) == X0 { + args[1] = "$(0.0)" + } + fallthrough + case FMV_X_D: + op = "MOVD" + case FSGNJ_S: if inst.Args[2] == inst.Args[1] { op = "MOVF" @@ -251,13 +275,13 @@ func GoSyntax(inst Inst, pc uint64, symname func(uint64) (string, uint64), text case FLW, FSW: op = "MOVF" - if inst.Op == FLW { + if inst.Op == FSW { args[0], args[1] = args[1], args[0] } case FLD, FSD: op = "MOVD" - if inst.Op == FLD { + if inst.Op == FSD { args[0], args[1] = args[1], args[0] } @@ -317,6 +341,12 @@ func GoSyntax(inst Inst, pc uint64, symname func(uint64) (string, uint64), text } else { args[0], args[1] = args[1], args[0] } + + case VSETVLI, VSETIVLI: + args[0], args[1], args[2] = args[2], args[0], args[1] + + case VSETVL: + args[0], args[2] = args[2], args[0] } // Reverse args, placing dest last. @@ -354,13 +384,6 @@ func plan9Arg(inst *Inst, pc uint64, symname func(uint64) (string, uint64), arg } return fmt.Sprintf("$%d", int32(imm)) - case Reg: - if a <= 31 { - return fmt.Sprintf("X%d", a) - } else { - return fmt.Sprintf("F%d", a-32) - } - case RegOffset: if a.Ofs.Imm == 0 { return fmt.Sprintf("(X%d)", a.OfsReg) @@ -368,10 +391,66 @@ func plan9Arg(inst *Inst, pc uint64, symname func(uint64) (string, uint64), arg return fmt.Sprintf("%s(X%d)", a.Ofs.String(), a.OfsReg) } - case AmoReg: + case RegPtr: return fmt.Sprintf("(X%d)", a.reg) default: return strings.ToUpper(arg.String()) } } + +func plan9VectorOp(inst Inst, args []string) string { + // Instruction is either a vector load, store or an arithmetic + // operation. We can use the inst.Enc to figure out which. Whatever + // it is, it has at least one argument. + + var op string + rawArgs := inst.Args[:] + + var mask string + if inst.Enc&(1<<25) == 0 { + mask = "V0" + if !implicitMask(inst.Op) { + args = args[1:] + rawArgs = rawArgs[1:] + } + } + + if len(args) > 1 { + if inst.Enc&0x7f == 0x7 { + // It's a load + if len(args) == 3 { + args[0], args[1] = args[1], args[0] + } + op = pseudoRVVLoad(inst.Op) + } else if inst.Enc&0x7f == 0x27 { + // It's a store + if len(args) == 3 { + args[0], args[1], args[2] = args[2], args[0], args[1] + } else if len(args) == 2 { + args[0], args[1] = args[1], args[0] + } + } else { + // It's an arithmetic instruction + + op, args = pseudoRVVArith(inst.Op, rawArgs, args) + + if len(args) == 3 && !imaOrFma(inst.Op) { + args[0], args[1] = args[1], args[0] + } + } + } + + // The mask is always the penultimate argument + + if mask != "" { + args = append(args[:len(args)-1], mask, args[len(args)-1]) + } + + if op == "" { + op = inst.Op.String() + } + + op = strings.Replace(op, ".", "", -1) + return op + " " + strings.Join(args, ", ") +} diff --git a/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/tables.go b/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/tables.go index 3e5db415e71..2a951f92c6b 100644 --- a/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/tables.go +++ b/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/tables.go @@ -116,6 +116,8 @@ const ( CSRRWI CTZ CTZW + CZERO_EQZ + CZERO_NEZ C_ADD C_ADDI C_ADDI16SP @@ -369,6 +371,633 @@ const ( SUB SUBW SW + VAADDU_VV + VAADDU_VX + VAADD_VV + VAADD_VX + VADC_VIM + VADC_VVM + VADC_VXM + VADD_VI + VADD_VV + VADD_VX + VAND_VI + VAND_VV + VAND_VX + VASUBU_VV + VASUBU_VX + VASUB_VV + VASUB_VX + VCOMPRESS_VM + VCPOP_M + VDIVU_VV + VDIVU_VX + VDIV_VV + VDIV_VX + VFADD_VF + VFADD_VV + VFCLASS_V + VFCVT_F_XU_V + VFCVT_F_X_V + VFCVT_RTZ_XU_F_V + VFCVT_RTZ_X_F_V + VFCVT_XU_F_V + VFCVT_X_F_V + VFDIV_VF + VFDIV_VV + VFIRST_M + VFMACC_VF + VFMACC_VV + VFMADD_VF + VFMADD_VV + VFMAX_VF + VFMAX_VV + VFMERGE_VFM + VFMIN_VF + VFMIN_VV + VFMSAC_VF + VFMSAC_VV + VFMSUB_VF + VFMSUB_VV + VFMUL_VF + VFMUL_VV + VFMV_F_S + VFMV_S_F + VFMV_V_F + VFNCVT_F_F_W + VFNCVT_F_XU_W + VFNCVT_F_X_W + VFNCVT_ROD_F_F_W + VFNCVT_RTZ_XU_F_W + VFNCVT_RTZ_X_F_W + VFNCVT_XU_F_W + VFNCVT_X_F_W + VFNMACC_VF + VFNMACC_VV + VFNMADD_VF + VFNMADD_VV + VFNMSAC_VF + VFNMSAC_VV + VFNMSUB_VF + VFNMSUB_VV + VFRDIV_VF + VFREC7_V + VFREDMAX_VS + VFREDMIN_VS + VFREDOSUM_VS + VFREDUSUM_VS + VFRSQRT7_V + VFRSUB_VF + VFSGNJN_VF + VFSGNJN_VV + VFSGNJX_VF + VFSGNJX_VV + VFSGNJ_VF + VFSGNJ_VV + VFSLIDE1DOWN_VF + VFSLIDE1UP_VF + VFSQRT_V + VFSUB_VF + VFSUB_VV + VFWADD_VF + VFWADD_VV + VFWADD_WF + VFWADD_WV + VFWCVT_F_F_V + VFWCVT_F_XU_V + VFWCVT_F_X_V + VFWCVT_RTZ_XU_F_V + VFWCVT_RTZ_X_F_V + VFWCVT_XU_F_V + VFWCVT_X_F_V + VFWMACC_VF + VFWMACC_VV + VFWMSAC_VF + VFWMSAC_VV + VFWMUL_VF + VFWMUL_VV + VFWNMACC_VF + VFWNMACC_VV + VFWNMSAC_VF + VFWNMSAC_VV + VFWREDOSUM_VS + VFWREDUSUM_VS + VFWSUB_VF + VFWSUB_VV + VFWSUB_WF + VFWSUB_WV + VID_V + VIOTA_M + VL1RE16_V + VL1RE32_V + VL1RE64_V + VL1RE8_V + VL2RE16_V + VL2RE32_V + VL2RE64_V + VL2RE8_V + VL4RE16_V + VL4RE32_V + VL4RE64_V + VL4RE8_V + VL8RE16_V + VL8RE32_V + VL8RE64_V + VL8RE8_V + VLE16FF_V + VLE16_V + VLE32FF_V + VLE32_V + VLE64FF_V + VLE64_V + VLE8FF_V + VLE8_V + VLM_V + VLOXEI16_V + VLOXEI32_V + VLOXEI64_V + VLOXEI8_V + VLOXSEG2EI16_V + VLOXSEG2EI32_V + VLOXSEG2EI64_V + VLOXSEG2EI8_V + VLOXSEG3EI16_V + VLOXSEG3EI32_V + VLOXSEG3EI64_V + VLOXSEG3EI8_V + VLOXSEG4EI16_V + VLOXSEG4EI32_V + VLOXSEG4EI64_V + VLOXSEG4EI8_V + VLOXSEG5EI16_V + VLOXSEG5EI32_V + VLOXSEG5EI64_V + VLOXSEG5EI8_V + VLOXSEG6EI16_V + VLOXSEG6EI32_V + VLOXSEG6EI64_V + VLOXSEG6EI8_V + VLOXSEG7EI16_V + VLOXSEG7EI32_V + VLOXSEG7EI64_V + VLOXSEG7EI8_V + VLOXSEG8EI16_V + VLOXSEG8EI32_V + VLOXSEG8EI64_V + VLOXSEG8EI8_V + VLSE16_V + VLSE32_V + VLSE64_V + VLSE8_V + VLSEG2E16FF_V + VLSEG2E16_V + VLSEG2E32FF_V + VLSEG2E32_V + VLSEG2E64FF_V + VLSEG2E64_V + VLSEG2E8FF_V + VLSEG2E8_V + VLSEG3E16FF_V + VLSEG3E16_V + VLSEG3E32FF_V + VLSEG3E32_V + VLSEG3E64FF_V + VLSEG3E64_V + VLSEG3E8FF_V + VLSEG3E8_V + VLSEG4E16FF_V + VLSEG4E16_V + VLSEG4E32FF_V + VLSEG4E32_V + VLSEG4E64FF_V + VLSEG4E64_V + VLSEG4E8FF_V + VLSEG4E8_V + VLSEG5E16FF_V + VLSEG5E16_V + VLSEG5E32FF_V + VLSEG5E32_V + VLSEG5E64FF_V + VLSEG5E64_V + VLSEG5E8FF_V + VLSEG5E8_V + VLSEG6E16FF_V + VLSEG6E16_V + VLSEG6E32FF_V + VLSEG6E32_V + VLSEG6E64FF_V + VLSEG6E64_V + VLSEG6E8FF_V + VLSEG6E8_V + VLSEG7E16FF_V + VLSEG7E16_V + VLSEG7E32FF_V + VLSEG7E32_V + VLSEG7E64FF_V + VLSEG7E64_V + VLSEG7E8FF_V + VLSEG7E8_V + VLSEG8E16FF_V + VLSEG8E16_V + VLSEG8E32FF_V + VLSEG8E32_V + VLSEG8E64FF_V + VLSEG8E64_V + VLSEG8E8FF_V + VLSEG8E8_V + VLSSEG2E16_V + VLSSEG2E32_V + VLSSEG2E64_V + VLSSEG2E8_V + VLSSEG3E16_V + VLSSEG3E32_V + VLSSEG3E64_V + VLSSEG3E8_V + VLSSEG4E16_V + VLSSEG4E32_V + VLSSEG4E64_V + VLSSEG4E8_V + VLSSEG5E16_V + VLSSEG5E32_V + VLSSEG5E64_V + VLSSEG5E8_V + VLSSEG6E16_V + VLSSEG6E32_V + VLSSEG6E64_V + VLSSEG6E8_V + VLSSEG7E16_V + VLSSEG7E32_V + VLSSEG7E64_V + VLSSEG7E8_V + VLSSEG8E16_V + VLSSEG8E32_V + VLSSEG8E64_V + VLSSEG8E8_V + VLUXEI16_V + VLUXEI32_V + VLUXEI64_V + VLUXEI8_V + VLUXSEG2EI16_V + VLUXSEG2EI32_V + VLUXSEG2EI64_V + VLUXSEG2EI8_V + VLUXSEG3EI16_V + VLUXSEG3EI32_V + VLUXSEG3EI64_V + VLUXSEG3EI8_V + VLUXSEG4EI16_V + VLUXSEG4EI32_V + VLUXSEG4EI64_V + VLUXSEG4EI8_V + VLUXSEG5EI16_V + VLUXSEG5EI32_V + VLUXSEG5EI64_V + VLUXSEG5EI8_V + VLUXSEG6EI16_V + VLUXSEG6EI32_V + VLUXSEG6EI64_V + VLUXSEG6EI8_V + VLUXSEG7EI16_V + VLUXSEG7EI32_V + VLUXSEG7EI64_V + VLUXSEG7EI8_V + VLUXSEG8EI16_V + VLUXSEG8EI32_V + VLUXSEG8EI64_V + VLUXSEG8EI8_V + VMACC_VV + VMACC_VX + VMADC_VI + VMADC_VIM + VMADC_VV + VMADC_VVM + VMADC_VX + VMADC_VXM + VMADD_VV + VMADD_VX + VMANDN_MM + VMAND_MM + VMAXU_VV + VMAXU_VX + VMAX_VV + VMAX_VX + VMERGE_VIM + VMERGE_VVM + VMERGE_VXM + VMFEQ_VF + VMFEQ_VV + VMFGE_VF + VMFGT_VF + VMFLE_VF + VMFLE_VV + VMFLT_VF + VMFLT_VV + VMFNE_VF + VMFNE_VV + VMINU_VV + VMINU_VX + VMIN_VV + VMIN_VX + VMNAND_MM + VMNOR_MM + VMORN_MM + VMOR_MM + VMSBC_VV + VMSBC_VVM + VMSBC_VX + VMSBC_VXM + VMSBF_M + VMSEQ_VI + VMSEQ_VV + VMSEQ_VX + VMSGTU_VI + VMSGTU_VX + VMSGT_VI + VMSGT_VX + VMSIF_M + VMSLEU_VI + VMSLEU_VV + VMSLEU_VX + VMSLE_VI + VMSLE_VV + VMSLE_VX + VMSLTU_VV + VMSLTU_VX + VMSLT_VV + VMSLT_VX + VMSNE_VI + VMSNE_VV + VMSNE_VX + VMSOF_M + VMULHSU_VV + VMULHSU_VX + VMULHU_VV + VMULHU_VX + VMULH_VV + VMULH_VX + VMUL_VV + VMUL_VX + VMV1R_V + VMV2R_V + VMV4R_V + VMV8R_V + VMV_S_X + VMV_V_I + VMV_V_V + VMV_V_X + VMV_X_S + VMXNOR_MM + VMXOR_MM + VNCLIPU_WI + VNCLIPU_WV + VNCLIPU_WX + VNCLIP_WI + VNCLIP_WV + VNCLIP_WX + VNMSAC_VV + VNMSAC_VX + VNMSUB_VV + VNMSUB_VX + VNSRA_WI + VNSRA_WV + VNSRA_WX + VNSRL_WI + VNSRL_WV + VNSRL_WX + VOR_VI + VOR_VV + VOR_VX + VREDAND_VS + VREDMAXU_VS + VREDMAX_VS + VREDMINU_VS + VREDMIN_VS + VREDOR_VS + VREDSUM_VS + VREDXOR_VS + VREMU_VV + VREMU_VX + VREM_VV + VREM_VX + VRGATHEREI16_VV + VRGATHER_VI + VRGATHER_VV + VRGATHER_VX + VRSUB_VI + VRSUB_VX + VS1R_V + VS2R_V + VS4R_V + VS8R_V + VSADDU_VI + VSADDU_VV + VSADDU_VX + VSADD_VI + VSADD_VV + VSADD_VX + VSBC_VVM + VSBC_VXM + VSE16_V + VSE32_V + VSE64_V + VSE8_V + VSETIVLI + VSETVL + VSETVLI + VSEXT_VF2 + VSEXT_VF4 + VSEXT_VF8 + VSLIDE1DOWN_VX + VSLIDE1UP_VX + VSLIDEDOWN_VI + VSLIDEDOWN_VX + VSLIDEUP_VI + VSLIDEUP_VX + VSLL_VI + VSLL_VV + VSLL_VX + VSMUL_VV + VSMUL_VX + VSM_V + VSOXEI16_V + VSOXEI32_V + VSOXEI64_V + VSOXEI8_V + VSOXSEG2EI16_V + VSOXSEG2EI32_V + VSOXSEG2EI64_V + VSOXSEG2EI8_V + VSOXSEG3EI16_V + VSOXSEG3EI32_V + VSOXSEG3EI64_V + VSOXSEG3EI8_V + VSOXSEG4EI16_V + VSOXSEG4EI32_V + VSOXSEG4EI64_V + VSOXSEG4EI8_V + VSOXSEG5EI16_V + VSOXSEG5EI32_V + VSOXSEG5EI64_V + VSOXSEG5EI8_V + VSOXSEG6EI16_V + VSOXSEG6EI32_V + VSOXSEG6EI64_V + VSOXSEG6EI8_V + VSOXSEG7EI16_V + VSOXSEG7EI32_V + VSOXSEG7EI64_V + VSOXSEG7EI8_V + VSOXSEG8EI16_V + VSOXSEG8EI32_V + VSOXSEG8EI64_V + VSOXSEG8EI8_V + VSRA_VI + VSRA_VV + VSRA_VX + VSRL_VI + VSRL_VV + VSRL_VX + VSSE16_V + VSSE32_V + VSSE64_V + VSSE8_V + VSSEG2E16_V + VSSEG2E32_V + VSSEG2E64_V + VSSEG2E8_V + VSSEG3E16_V + VSSEG3E32_V + VSSEG3E64_V + VSSEG3E8_V + VSSEG4E16_V + VSSEG4E32_V + VSSEG4E64_V + VSSEG4E8_V + VSSEG5E16_V + VSSEG5E32_V + VSSEG5E64_V + VSSEG5E8_V + VSSEG6E16_V + VSSEG6E32_V + VSSEG6E64_V + VSSEG6E8_V + VSSEG7E16_V + VSSEG7E32_V + VSSEG7E64_V + VSSEG7E8_V + VSSEG8E16_V + VSSEG8E32_V + VSSEG8E64_V + VSSEG8E8_V + VSSRA_VI + VSSRA_VV + VSSRA_VX + VSSRL_VI + VSSRL_VV + VSSRL_VX + VSSSEG2E16_V + VSSSEG2E32_V + VSSSEG2E64_V + VSSSEG2E8_V + VSSSEG3E16_V + VSSSEG3E32_V + VSSSEG3E64_V + VSSSEG3E8_V + VSSSEG4E16_V + VSSSEG4E32_V + VSSSEG4E64_V + VSSSEG4E8_V + VSSSEG5E16_V + VSSSEG5E32_V + VSSSEG5E64_V + VSSSEG5E8_V + VSSSEG6E16_V + VSSSEG6E32_V + VSSSEG6E64_V + VSSSEG6E8_V + VSSSEG7E16_V + VSSSEG7E32_V + VSSSEG7E64_V + VSSSEG7E8_V + VSSSEG8E16_V + VSSSEG8E32_V + VSSSEG8E64_V + VSSSEG8E8_V + VSSUBU_VV + VSSUBU_VX + VSSUB_VV + VSSUB_VX + VSUB_VV + VSUB_VX + VSUXEI16_V + VSUXEI32_V + VSUXEI64_V + VSUXEI8_V + VSUXSEG2EI16_V + VSUXSEG2EI32_V + VSUXSEG2EI64_V + VSUXSEG2EI8_V + VSUXSEG3EI16_V + VSUXSEG3EI32_V + VSUXSEG3EI64_V + VSUXSEG3EI8_V + VSUXSEG4EI16_V + VSUXSEG4EI32_V + VSUXSEG4EI64_V + VSUXSEG4EI8_V + VSUXSEG5EI16_V + VSUXSEG5EI32_V + VSUXSEG5EI64_V + VSUXSEG5EI8_V + VSUXSEG6EI16_V + VSUXSEG6EI32_V + VSUXSEG6EI64_V + VSUXSEG6EI8_V + VSUXSEG7EI16_V + VSUXSEG7EI32_V + VSUXSEG7EI64_V + VSUXSEG7EI8_V + VSUXSEG8EI16_V + VSUXSEG8EI32_V + VSUXSEG8EI64_V + VSUXSEG8EI8_V + VWADDU_VV + VWADDU_VX + VWADDU_WV + VWADDU_WX + VWADD_VV + VWADD_VX + VWADD_WV + VWADD_WX + VWMACCSU_VV + VWMACCSU_VX + VWMACCUS_VX + VWMACCU_VV + VWMACCU_VX + VWMACC_VV + VWMACC_VX + VWMULSU_VV + VWMULSU_VX + VWMULU_VV + VWMULU_VX + VWMUL_VV + VWMUL_VX + VWREDSUMU_VS + VWREDSUM_VS + VWSUBU_VV + VWSUBU_VX + VWSUBU_WV + VWSUBU_WX + VWSUB_VV + VWSUB_VX + VWSUB_WV + VWSUB_WX + VXOR_VI + VXOR_VV + VXOR_VX + VZEXT_VF2 + VZEXT_VF4 + VZEXT_VF8 XNOR XOR XORI @@ -376,370 +1005,999 @@ const ( ) var opstr = [...]string{ - ADD: "ADD", - ADDI: "ADDI", - ADDIW: "ADDIW", - ADDW: "ADDW", - ADD_UW: "ADD.UW", - AMOADD_D: "AMOADD.D", - AMOADD_D_AQ: "AMOADD.D.AQ", - AMOADD_D_AQRL: "AMOADD.D.AQRL", - AMOADD_D_RL: "AMOADD.D.RL", - AMOADD_W: "AMOADD.W", - AMOADD_W_AQ: "AMOADD.W.AQ", - AMOADD_W_AQRL: "AMOADD.W.AQRL", - AMOADD_W_RL: "AMOADD.W.RL", - AMOAND_D: "AMOAND.D", - AMOAND_D_AQ: "AMOAND.D.AQ", - AMOAND_D_AQRL: "AMOAND.D.AQRL", - AMOAND_D_RL: "AMOAND.D.RL", - AMOAND_W: "AMOAND.W", - AMOAND_W_AQ: "AMOAND.W.AQ", - AMOAND_W_AQRL: "AMOAND.W.AQRL", - AMOAND_W_RL: "AMOAND.W.RL", - AMOMAXU_D: "AMOMAXU.D", - AMOMAXU_D_AQ: "AMOMAXU.D.AQ", - AMOMAXU_D_AQRL: "AMOMAXU.D.AQRL", - AMOMAXU_D_RL: "AMOMAXU.D.RL", - AMOMAXU_W: "AMOMAXU.W", - AMOMAXU_W_AQ: "AMOMAXU.W.AQ", - AMOMAXU_W_AQRL: "AMOMAXU.W.AQRL", - AMOMAXU_W_RL: "AMOMAXU.W.RL", - AMOMAX_D: "AMOMAX.D", - AMOMAX_D_AQ: "AMOMAX.D.AQ", - AMOMAX_D_AQRL: "AMOMAX.D.AQRL", - AMOMAX_D_RL: "AMOMAX.D.RL", - AMOMAX_W: "AMOMAX.W", - AMOMAX_W_AQ: "AMOMAX.W.AQ", - AMOMAX_W_AQRL: "AMOMAX.W.AQRL", - AMOMAX_W_RL: "AMOMAX.W.RL", - AMOMINU_D: "AMOMINU.D", - AMOMINU_D_AQ: "AMOMINU.D.AQ", - AMOMINU_D_AQRL: "AMOMINU.D.AQRL", - AMOMINU_D_RL: "AMOMINU.D.RL", - AMOMINU_W: "AMOMINU.W", - AMOMINU_W_AQ: "AMOMINU.W.AQ", - AMOMINU_W_AQRL: "AMOMINU.W.AQRL", - AMOMINU_W_RL: "AMOMINU.W.RL", - AMOMIN_D: "AMOMIN.D", - AMOMIN_D_AQ: "AMOMIN.D.AQ", - AMOMIN_D_AQRL: "AMOMIN.D.AQRL", - AMOMIN_D_RL: "AMOMIN.D.RL", - AMOMIN_W: "AMOMIN.W", - AMOMIN_W_AQ: "AMOMIN.W.AQ", - AMOMIN_W_AQRL: "AMOMIN.W.AQRL", - AMOMIN_W_RL: "AMOMIN.W.RL", - AMOOR_D: "AMOOR.D", - AMOOR_D_AQ: "AMOOR.D.AQ", - AMOOR_D_AQRL: "AMOOR.D.AQRL", - AMOOR_D_RL: "AMOOR.D.RL", - AMOOR_W: "AMOOR.W", - AMOOR_W_AQ: "AMOOR.W.AQ", - AMOOR_W_AQRL: "AMOOR.W.AQRL", - AMOOR_W_RL: "AMOOR.W.RL", - AMOSWAP_D: "AMOSWAP.D", - AMOSWAP_D_AQ: "AMOSWAP.D.AQ", - AMOSWAP_D_AQRL: "AMOSWAP.D.AQRL", - AMOSWAP_D_RL: "AMOSWAP.D.RL", - AMOSWAP_W: "AMOSWAP.W", - AMOSWAP_W_AQ: "AMOSWAP.W.AQ", - AMOSWAP_W_AQRL: "AMOSWAP.W.AQRL", - AMOSWAP_W_RL: "AMOSWAP.W.RL", - AMOXOR_D: "AMOXOR.D", - AMOXOR_D_AQ: "AMOXOR.D.AQ", - AMOXOR_D_AQRL: "AMOXOR.D.AQRL", - AMOXOR_D_RL: "AMOXOR.D.RL", - AMOXOR_W: "AMOXOR.W", - AMOXOR_W_AQ: "AMOXOR.W.AQ", - AMOXOR_W_AQRL: "AMOXOR.W.AQRL", - AMOXOR_W_RL: "AMOXOR.W.RL", - AND: "AND", - ANDI: "ANDI", - ANDN: "ANDN", - AUIPC: "AUIPC", - BCLR: "BCLR", - BCLRI: "BCLRI", - BEQ: "BEQ", - BEXT: "BEXT", - BEXTI: "BEXTI", - BGE: "BGE", - BGEU: "BGEU", - BINV: "BINV", - BINVI: "BINVI", - BLT: "BLT", - BLTU: "BLTU", - BNE: "BNE", - BSET: "BSET", - BSETI: "BSETI", - CLZ: "CLZ", - CLZW: "CLZW", - CPOP: "CPOP", - CPOPW: "CPOPW", - CSRRC: "CSRRC", - CSRRCI: "CSRRCI", - CSRRS: "CSRRS", - CSRRSI: "CSRRSI", - CSRRW: "CSRRW", - CSRRWI: "CSRRWI", - CTZ: "CTZ", - CTZW: "CTZW", - C_ADD: "C.ADD", - C_ADDI: "C.ADDI", - C_ADDI16SP: "C.ADDI16SP", - C_ADDI4SPN: "C.ADDI4SPN", - C_ADDIW: "C.ADDIW", - C_ADDW: "C.ADDW", - C_AND: "C.AND", - C_ANDI: "C.ANDI", - C_BEQZ: "C.BEQZ", - C_BNEZ: "C.BNEZ", - C_EBREAK: "C.EBREAK", - C_FLD: "C.FLD", - C_FLDSP: "C.FLDSP", - C_FSD: "C.FSD", - C_FSDSP: "C.FSDSP", - C_J: "C.J", - C_JALR: "C.JALR", - C_JR: "C.JR", - C_LD: "C.LD", - C_LDSP: "C.LDSP", - C_LI: "C.LI", - C_LUI: "C.LUI", - C_LW: "C.LW", - C_LWSP: "C.LWSP", - C_MV: "C.MV", - C_NOP: "C.NOP", - C_OR: "C.OR", - C_SD: "C.SD", - C_SDSP: "C.SDSP", - C_SLLI: "C.SLLI", - C_SRAI: "C.SRAI", - C_SRLI: "C.SRLI", - C_SUB: "C.SUB", - C_SUBW: "C.SUBW", - C_SW: "C.SW", - C_SWSP: "C.SWSP", - C_UNIMP: "C.UNIMP", - C_XOR: "C.XOR", - DIV: "DIV", - DIVU: "DIVU", - DIVUW: "DIVUW", - DIVW: "DIVW", - EBREAK: "EBREAK", - ECALL: "ECALL", - FADD_D: "FADD.D", - FADD_H: "FADD.H", - FADD_Q: "FADD.Q", - FADD_S: "FADD.S", - FCLASS_D: "FCLASS.D", - FCLASS_H: "FCLASS.H", - FCLASS_Q: "FCLASS.Q", - FCLASS_S: "FCLASS.S", - FCVT_D_L: "FCVT.D.L", - FCVT_D_LU: "FCVT.D.LU", - FCVT_D_Q: "FCVT.D.Q", - FCVT_D_S: "FCVT.D.S", - FCVT_D_W: "FCVT.D.W", - FCVT_D_WU: "FCVT.D.WU", - FCVT_H_L: "FCVT.H.L", - FCVT_H_LU: "FCVT.H.LU", - FCVT_H_S: "FCVT.H.S", - FCVT_H_W: "FCVT.H.W", - FCVT_H_WU: "FCVT.H.WU", - FCVT_LU_D: "FCVT.LU.D", - FCVT_LU_H: "FCVT.LU.H", - FCVT_LU_Q: "FCVT.LU.Q", - FCVT_LU_S: "FCVT.LU.S", - FCVT_L_D: "FCVT.L.D", - FCVT_L_H: "FCVT.L.H", - FCVT_L_Q: "FCVT.L.Q", - FCVT_L_S: "FCVT.L.S", - FCVT_Q_D: "FCVT.Q.D", - FCVT_Q_L: "FCVT.Q.L", - FCVT_Q_LU: "FCVT.Q.LU", - FCVT_Q_S: "FCVT.Q.S", - FCVT_Q_W: "FCVT.Q.W", - FCVT_Q_WU: "FCVT.Q.WU", - FCVT_S_D: "FCVT.S.D", - FCVT_S_H: "FCVT.S.H", - FCVT_S_L: "FCVT.S.L", - FCVT_S_LU: "FCVT.S.LU", - FCVT_S_Q: "FCVT.S.Q", - FCVT_S_W: "FCVT.S.W", - FCVT_S_WU: "FCVT.S.WU", - FCVT_WU_D: "FCVT.WU.D", - FCVT_WU_H: "FCVT.WU.H", - FCVT_WU_Q: "FCVT.WU.Q", - FCVT_WU_S: "FCVT.WU.S", - FCVT_W_D: "FCVT.W.D", - FCVT_W_H: "FCVT.W.H", - FCVT_W_Q: "FCVT.W.Q", - FCVT_W_S: "FCVT.W.S", - FDIV_D: "FDIV.D", - FDIV_H: "FDIV.H", - FDIV_Q: "FDIV.Q", - FDIV_S: "FDIV.S", - FENCE: "FENCE", - FENCE_I: "FENCE.I", - FEQ_D: "FEQ.D", - FEQ_H: "FEQ.H", - FEQ_Q: "FEQ.Q", - FEQ_S: "FEQ.S", - FLD: "FLD", - FLE_D: "FLE.D", - FLE_H: "FLE.H", - FLE_Q: "FLE.Q", - FLE_S: "FLE.S", - FLH: "FLH", - FLQ: "FLQ", - FLT_D: "FLT.D", - FLT_H: "FLT.H", - FLT_Q: "FLT.Q", - FLT_S: "FLT.S", - FLW: "FLW", - FMADD_D: "FMADD.D", - FMADD_H: "FMADD.H", - FMADD_Q: "FMADD.Q", - FMADD_S: "FMADD.S", - FMAX_D: "FMAX.D", - FMAX_H: "FMAX.H", - FMAX_Q: "FMAX.Q", - FMAX_S: "FMAX.S", - FMIN_D: "FMIN.D", - FMIN_H: "FMIN.H", - FMIN_Q: "FMIN.Q", - FMIN_S: "FMIN.S", - FMSUB_D: "FMSUB.D", - FMSUB_H: "FMSUB.H", - FMSUB_Q: "FMSUB.Q", - FMSUB_S: "FMSUB.S", - FMUL_D: "FMUL.D", - FMUL_H: "FMUL.H", - FMUL_Q: "FMUL.Q", - FMUL_S: "FMUL.S", - FMV_D_X: "FMV.D.X", - FMV_H_X: "FMV.H.X", - FMV_W_X: "FMV.W.X", - FMV_X_D: "FMV.X.D", - FMV_X_H: "FMV.X.H", - FMV_X_W: "FMV.X.W", - FNMADD_D: "FNMADD.D", - FNMADD_H: "FNMADD.H", - FNMADD_Q: "FNMADD.Q", - FNMADD_S: "FNMADD.S", - FNMSUB_D: "FNMSUB.D", - FNMSUB_H: "FNMSUB.H", - FNMSUB_Q: "FNMSUB.Q", - FNMSUB_S: "FNMSUB.S", - FSD: "FSD", - FSGNJN_D: "FSGNJN.D", - FSGNJN_H: "FSGNJN.H", - FSGNJN_Q: "FSGNJN.Q", - FSGNJN_S: "FSGNJN.S", - FSGNJX_D: "FSGNJX.D", - FSGNJX_H: "FSGNJX.H", - FSGNJX_Q: "FSGNJX.Q", - FSGNJX_S: "FSGNJX.S", - FSGNJ_D: "FSGNJ.D", - FSGNJ_H: "FSGNJ.H", - FSGNJ_Q: "FSGNJ.Q", - FSGNJ_S: "FSGNJ.S", - FSH: "FSH", - FSQ: "FSQ", - FSQRT_D: "FSQRT.D", - FSQRT_H: "FSQRT.H", - FSQRT_Q: "FSQRT.Q", - FSQRT_S: "FSQRT.S", - FSUB_D: "FSUB.D", - FSUB_H: "FSUB.H", - FSUB_Q: "FSUB.Q", - FSUB_S: "FSUB.S", - FSW: "FSW", - JAL: "JAL", - JALR: "JALR", - LB: "LB", - LBU: "LBU", - LD: "LD", - LH: "LH", - LHU: "LHU", - LR_D: "LR.D", - LR_D_AQ: "LR.D.AQ", - LR_D_AQRL: "LR.D.AQRL", - LR_D_RL: "LR.D.RL", - LR_W: "LR.W", - LR_W_AQ: "LR.W.AQ", - LR_W_AQRL: "LR.W.AQRL", - LR_W_RL: "LR.W.RL", - LUI: "LUI", - LW: "LW", - LWU: "LWU", - MAX: "MAX", - MAXU: "MAXU", - MIN: "MIN", - MINU: "MINU", - MUL: "MUL", - MULH: "MULH", - MULHSU: "MULHSU", - MULHU: "MULHU", - MULW: "MULW", - OR: "OR", - ORC_B: "ORC.B", - ORI: "ORI", - ORN: "ORN", - REM: "REM", - REMU: "REMU", - REMUW: "REMUW", - REMW: "REMW", - REV8: "REV8", - ROL: "ROL", - ROLW: "ROLW", - ROR: "ROR", - RORI: "RORI", - RORIW: "RORIW", - RORW: "RORW", - SB: "SB", - SC_D: "SC.D", - SC_D_AQ: "SC.D.AQ", - SC_D_AQRL: "SC.D.AQRL", - SC_D_RL: "SC.D.RL", - SC_W: "SC.W", - SC_W_AQ: "SC.W.AQ", - SC_W_AQRL: "SC.W.AQRL", - SC_W_RL: "SC.W.RL", - SD: "SD", - SEXT_B: "SEXT.B", - SEXT_H: "SEXT.H", - SH: "SH", - SH1ADD: "SH1ADD", - SH1ADD_UW: "SH1ADD.UW", - SH2ADD: "SH2ADD", - SH2ADD_UW: "SH2ADD.UW", - SH3ADD: "SH3ADD", - SH3ADD_UW: "SH3ADD.UW", - SLL: "SLL", - SLLI: "SLLI", - SLLIW: "SLLIW", - SLLI_UW: "SLLI.UW", - SLLW: "SLLW", - SLT: "SLT", - SLTI: "SLTI", - SLTIU: "SLTIU", - SLTU: "SLTU", - SRA: "SRA", - SRAI: "SRAI", - SRAIW: "SRAIW", - SRAW: "SRAW", - SRL: "SRL", - SRLI: "SRLI", - SRLIW: "SRLIW", - SRLW: "SRLW", - SUB: "SUB", - SUBW: "SUBW", - SW: "SW", - XNOR: "XNOR", - XOR: "XOR", - XORI: "XORI", - ZEXT_H: "ZEXT.H", + ADD: "ADD", + ADDI: "ADDI", + ADDIW: "ADDIW", + ADDW: "ADDW", + ADD_UW: "ADD.UW", + AMOADD_D: "AMOADD.D", + AMOADD_D_AQ: "AMOADD.D.AQ", + AMOADD_D_AQRL: "AMOADD.D.AQRL", + AMOADD_D_RL: "AMOADD.D.RL", + AMOADD_W: "AMOADD.W", + AMOADD_W_AQ: "AMOADD.W.AQ", + AMOADD_W_AQRL: "AMOADD.W.AQRL", + AMOADD_W_RL: "AMOADD.W.RL", + AMOAND_D: "AMOAND.D", + AMOAND_D_AQ: "AMOAND.D.AQ", + AMOAND_D_AQRL: "AMOAND.D.AQRL", + AMOAND_D_RL: "AMOAND.D.RL", + AMOAND_W: "AMOAND.W", + AMOAND_W_AQ: "AMOAND.W.AQ", + AMOAND_W_AQRL: "AMOAND.W.AQRL", + AMOAND_W_RL: "AMOAND.W.RL", + AMOMAXU_D: "AMOMAXU.D", + AMOMAXU_D_AQ: "AMOMAXU.D.AQ", + AMOMAXU_D_AQRL: "AMOMAXU.D.AQRL", + AMOMAXU_D_RL: "AMOMAXU.D.RL", + AMOMAXU_W: "AMOMAXU.W", + AMOMAXU_W_AQ: "AMOMAXU.W.AQ", + AMOMAXU_W_AQRL: "AMOMAXU.W.AQRL", + AMOMAXU_W_RL: "AMOMAXU.W.RL", + AMOMAX_D: "AMOMAX.D", + AMOMAX_D_AQ: "AMOMAX.D.AQ", + AMOMAX_D_AQRL: "AMOMAX.D.AQRL", + AMOMAX_D_RL: "AMOMAX.D.RL", + AMOMAX_W: "AMOMAX.W", + AMOMAX_W_AQ: "AMOMAX.W.AQ", + AMOMAX_W_AQRL: "AMOMAX.W.AQRL", + AMOMAX_W_RL: "AMOMAX.W.RL", + AMOMINU_D: "AMOMINU.D", + AMOMINU_D_AQ: "AMOMINU.D.AQ", + AMOMINU_D_AQRL: "AMOMINU.D.AQRL", + AMOMINU_D_RL: "AMOMINU.D.RL", + AMOMINU_W: "AMOMINU.W", + AMOMINU_W_AQ: "AMOMINU.W.AQ", + AMOMINU_W_AQRL: "AMOMINU.W.AQRL", + AMOMINU_W_RL: "AMOMINU.W.RL", + AMOMIN_D: "AMOMIN.D", + AMOMIN_D_AQ: "AMOMIN.D.AQ", + AMOMIN_D_AQRL: "AMOMIN.D.AQRL", + AMOMIN_D_RL: "AMOMIN.D.RL", + AMOMIN_W: "AMOMIN.W", + AMOMIN_W_AQ: "AMOMIN.W.AQ", + AMOMIN_W_AQRL: "AMOMIN.W.AQRL", + AMOMIN_W_RL: "AMOMIN.W.RL", + AMOOR_D: "AMOOR.D", + AMOOR_D_AQ: "AMOOR.D.AQ", + AMOOR_D_AQRL: "AMOOR.D.AQRL", + AMOOR_D_RL: "AMOOR.D.RL", + AMOOR_W: "AMOOR.W", + AMOOR_W_AQ: "AMOOR.W.AQ", + AMOOR_W_AQRL: "AMOOR.W.AQRL", + AMOOR_W_RL: "AMOOR.W.RL", + AMOSWAP_D: "AMOSWAP.D", + AMOSWAP_D_AQ: "AMOSWAP.D.AQ", + AMOSWAP_D_AQRL: "AMOSWAP.D.AQRL", + AMOSWAP_D_RL: "AMOSWAP.D.RL", + AMOSWAP_W: "AMOSWAP.W", + AMOSWAP_W_AQ: "AMOSWAP.W.AQ", + AMOSWAP_W_AQRL: "AMOSWAP.W.AQRL", + AMOSWAP_W_RL: "AMOSWAP.W.RL", + AMOXOR_D: "AMOXOR.D", + AMOXOR_D_AQ: "AMOXOR.D.AQ", + AMOXOR_D_AQRL: "AMOXOR.D.AQRL", + AMOXOR_D_RL: "AMOXOR.D.RL", + AMOXOR_W: "AMOXOR.W", + AMOXOR_W_AQ: "AMOXOR.W.AQ", + AMOXOR_W_AQRL: "AMOXOR.W.AQRL", + AMOXOR_W_RL: "AMOXOR.W.RL", + AND: "AND", + ANDI: "ANDI", + ANDN: "ANDN", + AUIPC: "AUIPC", + BCLR: "BCLR", + BCLRI: "BCLRI", + BEQ: "BEQ", + BEXT: "BEXT", + BEXTI: "BEXTI", + BGE: "BGE", + BGEU: "BGEU", + BINV: "BINV", + BINVI: "BINVI", + BLT: "BLT", + BLTU: "BLTU", + BNE: "BNE", + BSET: "BSET", + BSETI: "BSETI", + CLZ: "CLZ", + CLZW: "CLZW", + CPOP: "CPOP", + CPOPW: "CPOPW", + CSRRC: "CSRRC", + CSRRCI: "CSRRCI", + CSRRS: "CSRRS", + CSRRSI: "CSRRSI", + CSRRW: "CSRRW", + CSRRWI: "CSRRWI", + CTZ: "CTZ", + CTZW: "CTZW", + CZERO_EQZ: "CZERO.EQZ", + CZERO_NEZ: "CZERO.NEZ", + C_ADD: "C.ADD", + C_ADDI: "C.ADDI", + C_ADDI16SP: "C.ADDI16SP", + C_ADDI4SPN: "C.ADDI4SPN", + C_ADDIW: "C.ADDIW", + C_ADDW: "C.ADDW", + C_AND: "C.AND", + C_ANDI: "C.ANDI", + C_BEQZ: "C.BEQZ", + C_BNEZ: "C.BNEZ", + C_EBREAK: "C.EBREAK", + C_FLD: "C.FLD", + C_FLDSP: "C.FLDSP", + C_FSD: "C.FSD", + C_FSDSP: "C.FSDSP", + C_J: "C.J", + C_JALR: "C.JALR", + C_JR: "C.JR", + C_LD: "C.LD", + C_LDSP: "C.LDSP", + C_LI: "C.LI", + C_LUI: "C.LUI", + C_LW: "C.LW", + C_LWSP: "C.LWSP", + C_MV: "C.MV", + C_NOP: "C.NOP", + C_OR: "C.OR", + C_SD: "C.SD", + C_SDSP: "C.SDSP", + C_SLLI: "C.SLLI", + C_SRAI: "C.SRAI", + C_SRLI: "C.SRLI", + C_SUB: "C.SUB", + C_SUBW: "C.SUBW", + C_SW: "C.SW", + C_SWSP: "C.SWSP", + C_UNIMP: "C.UNIMP", + C_XOR: "C.XOR", + DIV: "DIV", + DIVU: "DIVU", + DIVUW: "DIVUW", + DIVW: "DIVW", + EBREAK: "EBREAK", + ECALL: "ECALL", + FADD_D: "FADD.D", + FADD_H: "FADD.H", + FADD_Q: "FADD.Q", + FADD_S: "FADD.S", + FCLASS_D: "FCLASS.D", + FCLASS_H: "FCLASS.H", + FCLASS_Q: "FCLASS.Q", + FCLASS_S: "FCLASS.S", + FCVT_D_L: "FCVT.D.L", + FCVT_D_LU: "FCVT.D.LU", + FCVT_D_Q: "FCVT.D.Q", + FCVT_D_S: "FCVT.D.S", + FCVT_D_W: "FCVT.D.W", + FCVT_D_WU: "FCVT.D.WU", + FCVT_H_L: "FCVT.H.L", + FCVT_H_LU: "FCVT.H.LU", + FCVT_H_S: "FCVT.H.S", + FCVT_H_W: "FCVT.H.W", + FCVT_H_WU: "FCVT.H.WU", + FCVT_LU_D: "FCVT.LU.D", + FCVT_LU_H: "FCVT.LU.H", + FCVT_LU_Q: "FCVT.LU.Q", + FCVT_LU_S: "FCVT.LU.S", + FCVT_L_D: "FCVT.L.D", + FCVT_L_H: "FCVT.L.H", + FCVT_L_Q: "FCVT.L.Q", + FCVT_L_S: "FCVT.L.S", + FCVT_Q_D: "FCVT.Q.D", + FCVT_Q_L: "FCVT.Q.L", + FCVT_Q_LU: "FCVT.Q.LU", + FCVT_Q_S: "FCVT.Q.S", + FCVT_Q_W: "FCVT.Q.W", + FCVT_Q_WU: "FCVT.Q.WU", + FCVT_S_D: "FCVT.S.D", + FCVT_S_H: "FCVT.S.H", + FCVT_S_L: "FCVT.S.L", + FCVT_S_LU: "FCVT.S.LU", + FCVT_S_Q: "FCVT.S.Q", + FCVT_S_W: "FCVT.S.W", + FCVT_S_WU: "FCVT.S.WU", + FCVT_WU_D: "FCVT.WU.D", + FCVT_WU_H: "FCVT.WU.H", + FCVT_WU_Q: "FCVT.WU.Q", + FCVT_WU_S: "FCVT.WU.S", + FCVT_W_D: "FCVT.W.D", + FCVT_W_H: "FCVT.W.H", + FCVT_W_Q: "FCVT.W.Q", + FCVT_W_S: "FCVT.W.S", + FDIV_D: "FDIV.D", + FDIV_H: "FDIV.H", + FDIV_Q: "FDIV.Q", + FDIV_S: "FDIV.S", + FENCE: "FENCE", + FENCE_I: "FENCE.I", + FEQ_D: "FEQ.D", + FEQ_H: "FEQ.H", + FEQ_Q: "FEQ.Q", + FEQ_S: "FEQ.S", + FLD: "FLD", + FLE_D: "FLE.D", + FLE_H: "FLE.H", + FLE_Q: "FLE.Q", + FLE_S: "FLE.S", + FLH: "FLH", + FLQ: "FLQ", + FLT_D: "FLT.D", + FLT_H: "FLT.H", + FLT_Q: "FLT.Q", + FLT_S: "FLT.S", + FLW: "FLW", + FMADD_D: "FMADD.D", + FMADD_H: "FMADD.H", + FMADD_Q: "FMADD.Q", + FMADD_S: "FMADD.S", + FMAX_D: "FMAX.D", + FMAX_H: "FMAX.H", + FMAX_Q: "FMAX.Q", + FMAX_S: "FMAX.S", + FMIN_D: "FMIN.D", + FMIN_H: "FMIN.H", + FMIN_Q: "FMIN.Q", + FMIN_S: "FMIN.S", + FMSUB_D: "FMSUB.D", + FMSUB_H: "FMSUB.H", + FMSUB_Q: "FMSUB.Q", + FMSUB_S: "FMSUB.S", + FMUL_D: "FMUL.D", + FMUL_H: "FMUL.H", + FMUL_Q: "FMUL.Q", + FMUL_S: "FMUL.S", + FMV_D_X: "FMV.D.X", + FMV_H_X: "FMV.H.X", + FMV_W_X: "FMV.W.X", + FMV_X_D: "FMV.X.D", + FMV_X_H: "FMV.X.H", + FMV_X_W: "FMV.X.W", + FNMADD_D: "FNMADD.D", + FNMADD_H: "FNMADD.H", + FNMADD_Q: "FNMADD.Q", + FNMADD_S: "FNMADD.S", + FNMSUB_D: "FNMSUB.D", + FNMSUB_H: "FNMSUB.H", + FNMSUB_Q: "FNMSUB.Q", + FNMSUB_S: "FNMSUB.S", + FSD: "FSD", + FSGNJN_D: "FSGNJN.D", + FSGNJN_H: "FSGNJN.H", + FSGNJN_Q: "FSGNJN.Q", + FSGNJN_S: "FSGNJN.S", + FSGNJX_D: "FSGNJX.D", + FSGNJX_H: "FSGNJX.H", + FSGNJX_Q: "FSGNJX.Q", + FSGNJX_S: "FSGNJX.S", + FSGNJ_D: "FSGNJ.D", + FSGNJ_H: "FSGNJ.H", + FSGNJ_Q: "FSGNJ.Q", + FSGNJ_S: "FSGNJ.S", + FSH: "FSH", + FSQ: "FSQ", + FSQRT_D: "FSQRT.D", + FSQRT_H: "FSQRT.H", + FSQRT_Q: "FSQRT.Q", + FSQRT_S: "FSQRT.S", + FSUB_D: "FSUB.D", + FSUB_H: "FSUB.H", + FSUB_Q: "FSUB.Q", + FSUB_S: "FSUB.S", + FSW: "FSW", + JAL: "JAL", + JALR: "JALR", + LB: "LB", + LBU: "LBU", + LD: "LD", + LH: "LH", + LHU: "LHU", + LR_D: "LR.D", + LR_D_AQ: "LR.D.AQ", + LR_D_AQRL: "LR.D.AQRL", + LR_D_RL: "LR.D.RL", + LR_W: "LR.W", + LR_W_AQ: "LR.W.AQ", + LR_W_AQRL: "LR.W.AQRL", + LR_W_RL: "LR.W.RL", + LUI: "LUI", + LW: "LW", + LWU: "LWU", + MAX: "MAX", + MAXU: "MAXU", + MIN: "MIN", + MINU: "MINU", + MUL: "MUL", + MULH: "MULH", + MULHSU: "MULHSU", + MULHU: "MULHU", + MULW: "MULW", + OR: "OR", + ORC_B: "ORC.B", + ORI: "ORI", + ORN: "ORN", + REM: "REM", + REMU: "REMU", + REMUW: "REMUW", + REMW: "REMW", + REV8: "REV8", + ROL: "ROL", + ROLW: "ROLW", + ROR: "ROR", + RORI: "RORI", + RORIW: "RORIW", + RORW: "RORW", + SB: "SB", + SC_D: "SC.D", + SC_D_AQ: "SC.D.AQ", + SC_D_AQRL: "SC.D.AQRL", + SC_D_RL: "SC.D.RL", + SC_W: "SC.W", + SC_W_AQ: "SC.W.AQ", + SC_W_AQRL: "SC.W.AQRL", + SC_W_RL: "SC.W.RL", + SD: "SD", + SEXT_B: "SEXT.B", + SEXT_H: "SEXT.H", + SH: "SH", + SH1ADD: "SH1ADD", + SH1ADD_UW: "SH1ADD.UW", + SH2ADD: "SH2ADD", + SH2ADD_UW: "SH2ADD.UW", + SH3ADD: "SH3ADD", + SH3ADD_UW: "SH3ADD.UW", + SLL: "SLL", + SLLI: "SLLI", + SLLIW: "SLLIW", + SLLI_UW: "SLLI.UW", + SLLW: "SLLW", + SLT: "SLT", + SLTI: "SLTI", + SLTIU: "SLTIU", + SLTU: "SLTU", + SRA: "SRA", + SRAI: "SRAI", + SRAIW: "SRAIW", + SRAW: "SRAW", + SRL: "SRL", + SRLI: "SRLI", + SRLIW: "SRLIW", + SRLW: "SRLW", + SUB: "SUB", + SUBW: "SUBW", + SW: "SW", + VAADDU_VV: "VAADDU.VV", + VAADDU_VX: "VAADDU.VX", + VAADD_VV: "VAADD.VV", + VAADD_VX: "VAADD.VX", + VADC_VIM: "VADC.VIM", + VADC_VVM: "VADC.VVM", + VADC_VXM: "VADC.VXM", + VADD_VI: "VADD.VI", + VADD_VV: "VADD.VV", + VADD_VX: "VADD.VX", + VAND_VI: "VAND.VI", + VAND_VV: "VAND.VV", + VAND_VX: "VAND.VX", + VASUBU_VV: "VASUBU.VV", + VASUBU_VX: "VASUBU.VX", + VASUB_VV: "VASUB.VV", + VASUB_VX: "VASUB.VX", + VCOMPRESS_VM: "VCOMPRESS.VM", + VCPOP_M: "VCPOP.M", + VDIVU_VV: "VDIVU.VV", + VDIVU_VX: "VDIVU.VX", + VDIV_VV: "VDIV.VV", + VDIV_VX: "VDIV.VX", + VFADD_VF: "VFADD.VF", + VFADD_VV: "VFADD.VV", + VFCLASS_V: "VFCLASS.V", + VFCVT_F_XU_V: "VFCVT.F.XU.V", + VFCVT_F_X_V: "VFCVT.F.X.V", + VFCVT_RTZ_XU_F_V: "VFCVT.RTZ.XU.F.V", + VFCVT_RTZ_X_F_V: "VFCVT.RTZ.X.F.V", + VFCVT_XU_F_V: "VFCVT.XU.F.V", + VFCVT_X_F_V: "VFCVT.X.F.V", + VFDIV_VF: "VFDIV.VF", + VFDIV_VV: "VFDIV.VV", + VFIRST_M: "VFIRST.M", + VFMACC_VF: "VFMACC.VF", + VFMACC_VV: "VFMACC.VV", + VFMADD_VF: "VFMADD.VF", + VFMADD_VV: "VFMADD.VV", + VFMAX_VF: "VFMAX.VF", + VFMAX_VV: "VFMAX.VV", + VFMERGE_VFM: "VFMERGE.VFM", + VFMIN_VF: "VFMIN.VF", + VFMIN_VV: "VFMIN.VV", + VFMSAC_VF: "VFMSAC.VF", + VFMSAC_VV: "VFMSAC.VV", + VFMSUB_VF: "VFMSUB.VF", + VFMSUB_VV: "VFMSUB.VV", + VFMUL_VF: "VFMUL.VF", + VFMUL_VV: "VFMUL.VV", + VFMV_F_S: "VFMV.F.S", + VFMV_S_F: "VFMV.S.F", + VFMV_V_F: "VFMV.V.F", + VFNCVT_F_F_W: "VFNCVT.F.F.W", + VFNCVT_F_XU_W: "VFNCVT.F.XU.W", + VFNCVT_F_X_W: "VFNCVT.F.X.W", + VFNCVT_ROD_F_F_W: "VFNCVT.ROD.F.F.W", + VFNCVT_RTZ_XU_F_W: "VFNCVT.RTZ.XU.F.W", + VFNCVT_RTZ_X_F_W: "VFNCVT.RTZ.X.F.W", + VFNCVT_XU_F_W: "VFNCVT.XU.F.W", + VFNCVT_X_F_W: "VFNCVT.X.F.W", + VFNMACC_VF: "VFNMACC.VF", + VFNMACC_VV: "VFNMACC.VV", + VFNMADD_VF: "VFNMADD.VF", + VFNMADD_VV: "VFNMADD.VV", + VFNMSAC_VF: "VFNMSAC.VF", + VFNMSAC_VV: "VFNMSAC.VV", + VFNMSUB_VF: "VFNMSUB.VF", + VFNMSUB_VV: "VFNMSUB.VV", + VFRDIV_VF: "VFRDIV.VF", + VFREC7_V: "VFREC7.V", + VFREDMAX_VS: "VFREDMAX.VS", + VFREDMIN_VS: "VFREDMIN.VS", + VFREDOSUM_VS: "VFREDOSUM.VS", + VFREDUSUM_VS: "VFREDUSUM.VS", + VFRSQRT7_V: "VFRSQRT7.V", + VFRSUB_VF: "VFRSUB.VF", + VFSGNJN_VF: "VFSGNJN.VF", + VFSGNJN_VV: "VFSGNJN.VV", + VFSGNJX_VF: "VFSGNJX.VF", + VFSGNJX_VV: "VFSGNJX.VV", + VFSGNJ_VF: "VFSGNJ.VF", + VFSGNJ_VV: "VFSGNJ.VV", + VFSLIDE1DOWN_VF: "VFSLIDE1DOWN.VF", + VFSLIDE1UP_VF: "VFSLIDE1UP.VF", + VFSQRT_V: "VFSQRT.V", + VFSUB_VF: "VFSUB.VF", + VFSUB_VV: "VFSUB.VV", + VFWADD_VF: "VFWADD.VF", + VFWADD_VV: "VFWADD.VV", + VFWADD_WF: "VFWADD.WF", + VFWADD_WV: "VFWADD.WV", + VFWCVT_F_F_V: "VFWCVT.F.F.V", + VFWCVT_F_XU_V: "VFWCVT.F.XU.V", + VFWCVT_F_X_V: "VFWCVT.F.X.V", + VFWCVT_RTZ_XU_F_V: "VFWCVT.RTZ.XU.F.V", + VFWCVT_RTZ_X_F_V: "VFWCVT.RTZ.X.F.V", + VFWCVT_XU_F_V: "VFWCVT.XU.F.V", + VFWCVT_X_F_V: "VFWCVT.X.F.V", + VFWMACC_VF: "VFWMACC.VF", + VFWMACC_VV: "VFWMACC.VV", + VFWMSAC_VF: "VFWMSAC.VF", + VFWMSAC_VV: "VFWMSAC.VV", + VFWMUL_VF: "VFWMUL.VF", + VFWMUL_VV: "VFWMUL.VV", + VFWNMACC_VF: "VFWNMACC.VF", + VFWNMACC_VV: "VFWNMACC.VV", + VFWNMSAC_VF: "VFWNMSAC.VF", + VFWNMSAC_VV: "VFWNMSAC.VV", + VFWREDOSUM_VS: "VFWREDOSUM.VS", + VFWREDUSUM_VS: "VFWREDUSUM.VS", + VFWSUB_VF: "VFWSUB.VF", + VFWSUB_VV: "VFWSUB.VV", + VFWSUB_WF: "VFWSUB.WF", + VFWSUB_WV: "VFWSUB.WV", + VID_V: "VID.V", + VIOTA_M: "VIOTA.M", + VL1RE16_V: "VL1RE16.V", + VL1RE32_V: "VL1RE32.V", + VL1RE64_V: "VL1RE64.V", + VL1RE8_V: "VL1RE8.V", + VL2RE16_V: "VL2RE16.V", + VL2RE32_V: "VL2RE32.V", + VL2RE64_V: "VL2RE64.V", + VL2RE8_V: "VL2RE8.V", + VL4RE16_V: "VL4RE16.V", + VL4RE32_V: "VL4RE32.V", + VL4RE64_V: "VL4RE64.V", + VL4RE8_V: "VL4RE8.V", + VL8RE16_V: "VL8RE16.V", + VL8RE32_V: "VL8RE32.V", + VL8RE64_V: "VL8RE64.V", + VL8RE8_V: "VL8RE8.V", + VLE16FF_V: "VLE16FF.V", + VLE16_V: "VLE16.V", + VLE32FF_V: "VLE32FF.V", + VLE32_V: "VLE32.V", + VLE64FF_V: "VLE64FF.V", + VLE64_V: "VLE64.V", + VLE8FF_V: "VLE8FF.V", + VLE8_V: "VLE8.V", + VLM_V: "VLM.V", + VLOXEI16_V: "VLOXEI16.V", + VLOXEI32_V: "VLOXEI32.V", + VLOXEI64_V: "VLOXEI64.V", + VLOXEI8_V: "VLOXEI8.V", + VLOXSEG2EI16_V: "VLOXSEG2EI16.V", + VLOXSEG2EI32_V: "VLOXSEG2EI32.V", + VLOXSEG2EI64_V: "VLOXSEG2EI64.V", + VLOXSEG2EI8_V: "VLOXSEG2EI8.V", + VLOXSEG3EI16_V: "VLOXSEG3EI16.V", + VLOXSEG3EI32_V: "VLOXSEG3EI32.V", + VLOXSEG3EI64_V: "VLOXSEG3EI64.V", + VLOXSEG3EI8_V: "VLOXSEG3EI8.V", + VLOXSEG4EI16_V: "VLOXSEG4EI16.V", + VLOXSEG4EI32_V: "VLOXSEG4EI32.V", + VLOXSEG4EI64_V: "VLOXSEG4EI64.V", + VLOXSEG4EI8_V: "VLOXSEG4EI8.V", + VLOXSEG5EI16_V: "VLOXSEG5EI16.V", + VLOXSEG5EI32_V: "VLOXSEG5EI32.V", + VLOXSEG5EI64_V: "VLOXSEG5EI64.V", + VLOXSEG5EI8_V: "VLOXSEG5EI8.V", + VLOXSEG6EI16_V: "VLOXSEG6EI16.V", + VLOXSEG6EI32_V: "VLOXSEG6EI32.V", + VLOXSEG6EI64_V: "VLOXSEG6EI64.V", + VLOXSEG6EI8_V: "VLOXSEG6EI8.V", + VLOXSEG7EI16_V: "VLOXSEG7EI16.V", + VLOXSEG7EI32_V: "VLOXSEG7EI32.V", + VLOXSEG7EI64_V: "VLOXSEG7EI64.V", + VLOXSEG7EI8_V: "VLOXSEG7EI8.V", + VLOXSEG8EI16_V: "VLOXSEG8EI16.V", + VLOXSEG8EI32_V: "VLOXSEG8EI32.V", + VLOXSEG8EI64_V: "VLOXSEG8EI64.V", + VLOXSEG8EI8_V: "VLOXSEG8EI8.V", + VLSE16_V: "VLSE16.V", + VLSE32_V: "VLSE32.V", + VLSE64_V: "VLSE64.V", + VLSE8_V: "VLSE8.V", + VLSEG2E16FF_V: "VLSEG2E16FF.V", + VLSEG2E16_V: "VLSEG2E16.V", + VLSEG2E32FF_V: "VLSEG2E32FF.V", + VLSEG2E32_V: "VLSEG2E32.V", + VLSEG2E64FF_V: "VLSEG2E64FF.V", + VLSEG2E64_V: "VLSEG2E64.V", + VLSEG2E8FF_V: "VLSEG2E8FF.V", + VLSEG2E8_V: "VLSEG2E8.V", + VLSEG3E16FF_V: "VLSEG3E16FF.V", + VLSEG3E16_V: "VLSEG3E16.V", + VLSEG3E32FF_V: "VLSEG3E32FF.V", + VLSEG3E32_V: "VLSEG3E32.V", + VLSEG3E64FF_V: "VLSEG3E64FF.V", + VLSEG3E64_V: "VLSEG3E64.V", + VLSEG3E8FF_V: "VLSEG3E8FF.V", + VLSEG3E8_V: "VLSEG3E8.V", + VLSEG4E16FF_V: "VLSEG4E16FF.V", + VLSEG4E16_V: "VLSEG4E16.V", + VLSEG4E32FF_V: "VLSEG4E32FF.V", + VLSEG4E32_V: "VLSEG4E32.V", + VLSEG4E64FF_V: "VLSEG4E64FF.V", + VLSEG4E64_V: "VLSEG4E64.V", + VLSEG4E8FF_V: "VLSEG4E8FF.V", + VLSEG4E8_V: "VLSEG4E8.V", + VLSEG5E16FF_V: "VLSEG5E16FF.V", + VLSEG5E16_V: "VLSEG5E16.V", + VLSEG5E32FF_V: "VLSEG5E32FF.V", + VLSEG5E32_V: "VLSEG5E32.V", + VLSEG5E64FF_V: "VLSEG5E64FF.V", + VLSEG5E64_V: "VLSEG5E64.V", + VLSEG5E8FF_V: "VLSEG5E8FF.V", + VLSEG5E8_V: "VLSEG5E8.V", + VLSEG6E16FF_V: "VLSEG6E16FF.V", + VLSEG6E16_V: "VLSEG6E16.V", + VLSEG6E32FF_V: "VLSEG6E32FF.V", + VLSEG6E32_V: "VLSEG6E32.V", + VLSEG6E64FF_V: "VLSEG6E64FF.V", + VLSEG6E64_V: "VLSEG6E64.V", + VLSEG6E8FF_V: "VLSEG6E8FF.V", + VLSEG6E8_V: "VLSEG6E8.V", + VLSEG7E16FF_V: "VLSEG7E16FF.V", + VLSEG7E16_V: "VLSEG7E16.V", + VLSEG7E32FF_V: "VLSEG7E32FF.V", + VLSEG7E32_V: "VLSEG7E32.V", + VLSEG7E64FF_V: "VLSEG7E64FF.V", + VLSEG7E64_V: "VLSEG7E64.V", + VLSEG7E8FF_V: "VLSEG7E8FF.V", + VLSEG7E8_V: "VLSEG7E8.V", + VLSEG8E16FF_V: "VLSEG8E16FF.V", + VLSEG8E16_V: "VLSEG8E16.V", + VLSEG8E32FF_V: "VLSEG8E32FF.V", + VLSEG8E32_V: "VLSEG8E32.V", + VLSEG8E64FF_V: "VLSEG8E64FF.V", + VLSEG8E64_V: "VLSEG8E64.V", + VLSEG8E8FF_V: "VLSEG8E8FF.V", + VLSEG8E8_V: "VLSEG8E8.V", + VLSSEG2E16_V: "VLSSEG2E16.V", + VLSSEG2E32_V: "VLSSEG2E32.V", + VLSSEG2E64_V: "VLSSEG2E64.V", + VLSSEG2E8_V: "VLSSEG2E8.V", + VLSSEG3E16_V: "VLSSEG3E16.V", + VLSSEG3E32_V: "VLSSEG3E32.V", + VLSSEG3E64_V: "VLSSEG3E64.V", + VLSSEG3E8_V: "VLSSEG3E8.V", + VLSSEG4E16_V: "VLSSEG4E16.V", + VLSSEG4E32_V: "VLSSEG4E32.V", + VLSSEG4E64_V: "VLSSEG4E64.V", + VLSSEG4E8_V: "VLSSEG4E8.V", + VLSSEG5E16_V: "VLSSEG5E16.V", + VLSSEG5E32_V: "VLSSEG5E32.V", + VLSSEG5E64_V: "VLSSEG5E64.V", + VLSSEG5E8_V: "VLSSEG5E8.V", + VLSSEG6E16_V: "VLSSEG6E16.V", + VLSSEG6E32_V: "VLSSEG6E32.V", + VLSSEG6E64_V: "VLSSEG6E64.V", + VLSSEG6E8_V: "VLSSEG6E8.V", + VLSSEG7E16_V: "VLSSEG7E16.V", + VLSSEG7E32_V: "VLSSEG7E32.V", + VLSSEG7E64_V: "VLSSEG7E64.V", + VLSSEG7E8_V: "VLSSEG7E8.V", + VLSSEG8E16_V: "VLSSEG8E16.V", + VLSSEG8E32_V: "VLSSEG8E32.V", + VLSSEG8E64_V: "VLSSEG8E64.V", + VLSSEG8E8_V: "VLSSEG8E8.V", + VLUXEI16_V: "VLUXEI16.V", + VLUXEI32_V: "VLUXEI32.V", + VLUXEI64_V: "VLUXEI64.V", + VLUXEI8_V: "VLUXEI8.V", + VLUXSEG2EI16_V: "VLUXSEG2EI16.V", + VLUXSEG2EI32_V: "VLUXSEG2EI32.V", + VLUXSEG2EI64_V: "VLUXSEG2EI64.V", + VLUXSEG2EI8_V: "VLUXSEG2EI8.V", + VLUXSEG3EI16_V: "VLUXSEG3EI16.V", + VLUXSEG3EI32_V: "VLUXSEG3EI32.V", + VLUXSEG3EI64_V: "VLUXSEG3EI64.V", + VLUXSEG3EI8_V: "VLUXSEG3EI8.V", + VLUXSEG4EI16_V: "VLUXSEG4EI16.V", + VLUXSEG4EI32_V: "VLUXSEG4EI32.V", + VLUXSEG4EI64_V: "VLUXSEG4EI64.V", + VLUXSEG4EI8_V: "VLUXSEG4EI8.V", + VLUXSEG5EI16_V: "VLUXSEG5EI16.V", + VLUXSEG5EI32_V: "VLUXSEG5EI32.V", + VLUXSEG5EI64_V: "VLUXSEG5EI64.V", + VLUXSEG5EI8_V: "VLUXSEG5EI8.V", + VLUXSEG6EI16_V: "VLUXSEG6EI16.V", + VLUXSEG6EI32_V: "VLUXSEG6EI32.V", + VLUXSEG6EI64_V: "VLUXSEG6EI64.V", + VLUXSEG6EI8_V: "VLUXSEG6EI8.V", + VLUXSEG7EI16_V: "VLUXSEG7EI16.V", + VLUXSEG7EI32_V: "VLUXSEG7EI32.V", + VLUXSEG7EI64_V: "VLUXSEG7EI64.V", + VLUXSEG7EI8_V: "VLUXSEG7EI8.V", + VLUXSEG8EI16_V: "VLUXSEG8EI16.V", + VLUXSEG8EI32_V: "VLUXSEG8EI32.V", + VLUXSEG8EI64_V: "VLUXSEG8EI64.V", + VLUXSEG8EI8_V: "VLUXSEG8EI8.V", + VMACC_VV: "VMACC.VV", + VMACC_VX: "VMACC.VX", + VMADC_VI: "VMADC.VI", + VMADC_VIM: "VMADC.VIM", + VMADC_VV: "VMADC.VV", + VMADC_VVM: "VMADC.VVM", + VMADC_VX: "VMADC.VX", + VMADC_VXM: "VMADC.VXM", + VMADD_VV: "VMADD.VV", + VMADD_VX: "VMADD.VX", + VMANDN_MM: "VMANDN.MM", + VMAND_MM: "VMAND.MM", + VMAXU_VV: "VMAXU.VV", + VMAXU_VX: "VMAXU.VX", + VMAX_VV: "VMAX.VV", + VMAX_VX: "VMAX.VX", + VMERGE_VIM: "VMERGE.VIM", + VMERGE_VVM: "VMERGE.VVM", + VMERGE_VXM: "VMERGE.VXM", + VMFEQ_VF: "VMFEQ.VF", + VMFEQ_VV: "VMFEQ.VV", + VMFGE_VF: "VMFGE.VF", + VMFGT_VF: "VMFGT.VF", + VMFLE_VF: "VMFLE.VF", + VMFLE_VV: "VMFLE.VV", + VMFLT_VF: "VMFLT.VF", + VMFLT_VV: "VMFLT.VV", + VMFNE_VF: "VMFNE.VF", + VMFNE_VV: "VMFNE.VV", + VMINU_VV: "VMINU.VV", + VMINU_VX: "VMINU.VX", + VMIN_VV: "VMIN.VV", + VMIN_VX: "VMIN.VX", + VMNAND_MM: "VMNAND.MM", + VMNOR_MM: "VMNOR.MM", + VMORN_MM: "VMORN.MM", + VMOR_MM: "VMOR.MM", + VMSBC_VV: "VMSBC.VV", + VMSBC_VVM: "VMSBC.VVM", + VMSBC_VX: "VMSBC.VX", + VMSBC_VXM: "VMSBC.VXM", + VMSBF_M: "VMSBF.M", + VMSEQ_VI: "VMSEQ.VI", + VMSEQ_VV: "VMSEQ.VV", + VMSEQ_VX: "VMSEQ.VX", + VMSGTU_VI: "VMSGTU.VI", + VMSGTU_VX: "VMSGTU.VX", + VMSGT_VI: "VMSGT.VI", + VMSGT_VX: "VMSGT.VX", + VMSIF_M: "VMSIF.M", + VMSLEU_VI: "VMSLEU.VI", + VMSLEU_VV: "VMSLEU.VV", + VMSLEU_VX: "VMSLEU.VX", + VMSLE_VI: "VMSLE.VI", + VMSLE_VV: "VMSLE.VV", + VMSLE_VX: "VMSLE.VX", + VMSLTU_VV: "VMSLTU.VV", + VMSLTU_VX: "VMSLTU.VX", + VMSLT_VV: "VMSLT.VV", + VMSLT_VX: "VMSLT.VX", + VMSNE_VI: "VMSNE.VI", + VMSNE_VV: "VMSNE.VV", + VMSNE_VX: "VMSNE.VX", + VMSOF_M: "VMSOF.M", + VMULHSU_VV: "VMULHSU.VV", + VMULHSU_VX: "VMULHSU.VX", + VMULHU_VV: "VMULHU.VV", + VMULHU_VX: "VMULHU.VX", + VMULH_VV: "VMULH.VV", + VMULH_VX: "VMULH.VX", + VMUL_VV: "VMUL.VV", + VMUL_VX: "VMUL.VX", + VMV1R_V: "VMV1R.V", + VMV2R_V: "VMV2R.V", + VMV4R_V: "VMV4R.V", + VMV8R_V: "VMV8R.V", + VMV_S_X: "VMV.S.X", + VMV_V_I: "VMV.V.I", + VMV_V_V: "VMV.V.V", + VMV_V_X: "VMV.V.X", + VMV_X_S: "VMV.X.S", + VMXNOR_MM: "VMXNOR.MM", + VMXOR_MM: "VMXOR.MM", + VNCLIPU_WI: "VNCLIPU.WI", + VNCLIPU_WV: "VNCLIPU.WV", + VNCLIPU_WX: "VNCLIPU.WX", + VNCLIP_WI: "VNCLIP.WI", + VNCLIP_WV: "VNCLIP.WV", + VNCLIP_WX: "VNCLIP.WX", + VNMSAC_VV: "VNMSAC.VV", + VNMSAC_VX: "VNMSAC.VX", + VNMSUB_VV: "VNMSUB.VV", + VNMSUB_VX: "VNMSUB.VX", + VNSRA_WI: "VNSRA.WI", + VNSRA_WV: "VNSRA.WV", + VNSRA_WX: "VNSRA.WX", + VNSRL_WI: "VNSRL.WI", + VNSRL_WV: "VNSRL.WV", + VNSRL_WX: "VNSRL.WX", + VOR_VI: "VOR.VI", + VOR_VV: "VOR.VV", + VOR_VX: "VOR.VX", + VREDAND_VS: "VREDAND.VS", + VREDMAXU_VS: "VREDMAXU.VS", + VREDMAX_VS: "VREDMAX.VS", + VREDMINU_VS: "VREDMINU.VS", + VREDMIN_VS: "VREDMIN.VS", + VREDOR_VS: "VREDOR.VS", + VREDSUM_VS: "VREDSUM.VS", + VREDXOR_VS: "VREDXOR.VS", + VREMU_VV: "VREMU.VV", + VREMU_VX: "VREMU.VX", + VREM_VV: "VREM.VV", + VREM_VX: "VREM.VX", + VRGATHEREI16_VV: "VRGATHEREI16.VV", + VRGATHER_VI: "VRGATHER.VI", + VRGATHER_VV: "VRGATHER.VV", + VRGATHER_VX: "VRGATHER.VX", + VRSUB_VI: "VRSUB.VI", + VRSUB_VX: "VRSUB.VX", + VS1R_V: "VS1R.V", + VS2R_V: "VS2R.V", + VS4R_V: "VS4R.V", + VS8R_V: "VS8R.V", + VSADDU_VI: "VSADDU.VI", + VSADDU_VV: "VSADDU.VV", + VSADDU_VX: "VSADDU.VX", + VSADD_VI: "VSADD.VI", + VSADD_VV: "VSADD.VV", + VSADD_VX: "VSADD.VX", + VSBC_VVM: "VSBC.VVM", + VSBC_VXM: "VSBC.VXM", + VSE16_V: "VSE16.V", + VSE32_V: "VSE32.V", + VSE64_V: "VSE64.V", + VSE8_V: "VSE8.V", + VSETIVLI: "VSETIVLI", + VSETVL: "VSETVL", + VSETVLI: "VSETVLI", + VSEXT_VF2: "VSEXT.VF2", + VSEXT_VF4: "VSEXT.VF4", + VSEXT_VF8: "VSEXT.VF8", + VSLIDE1DOWN_VX: "VSLIDE1DOWN.VX", + VSLIDE1UP_VX: "VSLIDE1UP.VX", + VSLIDEDOWN_VI: "VSLIDEDOWN.VI", + VSLIDEDOWN_VX: "VSLIDEDOWN.VX", + VSLIDEUP_VI: "VSLIDEUP.VI", + VSLIDEUP_VX: "VSLIDEUP.VX", + VSLL_VI: "VSLL.VI", + VSLL_VV: "VSLL.VV", + VSLL_VX: "VSLL.VX", + VSMUL_VV: "VSMUL.VV", + VSMUL_VX: "VSMUL.VX", + VSM_V: "VSM.V", + VSOXEI16_V: "VSOXEI16.V", + VSOXEI32_V: "VSOXEI32.V", + VSOXEI64_V: "VSOXEI64.V", + VSOXEI8_V: "VSOXEI8.V", + VSOXSEG2EI16_V: "VSOXSEG2EI16.V", + VSOXSEG2EI32_V: "VSOXSEG2EI32.V", + VSOXSEG2EI64_V: "VSOXSEG2EI64.V", + VSOXSEG2EI8_V: "VSOXSEG2EI8.V", + VSOXSEG3EI16_V: "VSOXSEG3EI16.V", + VSOXSEG3EI32_V: "VSOXSEG3EI32.V", + VSOXSEG3EI64_V: "VSOXSEG3EI64.V", + VSOXSEG3EI8_V: "VSOXSEG3EI8.V", + VSOXSEG4EI16_V: "VSOXSEG4EI16.V", + VSOXSEG4EI32_V: "VSOXSEG4EI32.V", + VSOXSEG4EI64_V: "VSOXSEG4EI64.V", + VSOXSEG4EI8_V: "VSOXSEG4EI8.V", + VSOXSEG5EI16_V: "VSOXSEG5EI16.V", + VSOXSEG5EI32_V: "VSOXSEG5EI32.V", + VSOXSEG5EI64_V: "VSOXSEG5EI64.V", + VSOXSEG5EI8_V: "VSOXSEG5EI8.V", + VSOXSEG6EI16_V: "VSOXSEG6EI16.V", + VSOXSEG6EI32_V: "VSOXSEG6EI32.V", + VSOXSEG6EI64_V: "VSOXSEG6EI64.V", + VSOXSEG6EI8_V: "VSOXSEG6EI8.V", + VSOXSEG7EI16_V: "VSOXSEG7EI16.V", + VSOXSEG7EI32_V: "VSOXSEG7EI32.V", + VSOXSEG7EI64_V: "VSOXSEG7EI64.V", + VSOXSEG7EI8_V: "VSOXSEG7EI8.V", + VSOXSEG8EI16_V: "VSOXSEG8EI16.V", + VSOXSEG8EI32_V: "VSOXSEG8EI32.V", + VSOXSEG8EI64_V: "VSOXSEG8EI64.V", + VSOXSEG8EI8_V: "VSOXSEG8EI8.V", + VSRA_VI: "VSRA.VI", + VSRA_VV: "VSRA.VV", + VSRA_VX: "VSRA.VX", + VSRL_VI: "VSRL.VI", + VSRL_VV: "VSRL.VV", + VSRL_VX: "VSRL.VX", + VSSE16_V: "VSSE16.V", + VSSE32_V: "VSSE32.V", + VSSE64_V: "VSSE64.V", + VSSE8_V: "VSSE8.V", + VSSEG2E16_V: "VSSEG2E16.V", + VSSEG2E32_V: "VSSEG2E32.V", + VSSEG2E64_V: "VSSEG2E64.V", + VSSEG2E8_V: "VSSEG2E8.V", + VSSEG3E16_V: "VSSEG3E16.V", + VSSEG3E32_V: "VSSEG3E32.V", + VSSEG3E64_V: "VSSEG3E64.V", + VSSEG3E8_V: "VSSEG3E8.V", + VSSEG4E16_V: "VSSEG4E16.V", + VSSEG4E32_V: "VSSEG4E32.V", + VSSEG4E64_V: "VSSEG4E64.V", + VSSEG4E8_V: "VSSEG4E8.V", + VSSEG5E16_V: "VSSEG5E16.V", + VSSEG5E32_V: "VSSEG5E32.V", + VSSEG5E64_V: "VSSEG5E64.V", + VSSEG5E8_V: "VSSEG5E8.V", + VSSEG6E16_V: "VSSEG6E16.V", + VSSEG6E32_V: "VSSEG6E32.V", + VSSEG6E64_V: "VSSEG6E64.V", + VSSEG6E8_V: "VSSEG6E8.V", + VSSEG7E16_V: "VSSEG7E16.V", + VSSEG7E32_V: "VSSEG7E32.V", + VSSEG7E64_V: "VSSEG7E64.V", + VSSEG7E8_V: "VSSEG7E8.V", + VSSEG8E16_V: "VSSEG8E16.V", + VSSEG8E32_V: "VSSEG8E32.V", + VSSEG8E64_V: "VSSEG8E64.V", + VSSEG8E8_V: "VSSEG8E8.V", + VSSRA_VI: "VSSRA.VI", + VSSRA_VV: "VSSRA.VV", + VSSRA_VX: "VSSRA.VX", + VSSRL_VI: "VSSRL.VI", + VSSRL_VV: "VSSRL.VV", + VSSRL_VX: "VSSRL.VX", + VSSSEG2E16_V: "VSSSEG2E16.V", + VSSSEG2E32_V: "VSSSEG2E32.V", + VSSSEG2E64_V: "VSSSEG2E64.V", + VSSSEG2E8_V: "VSSSEG2E8.V", + VSSSEG3E16_V: "VSSSEG3E16.V", + VSSSEG3E32_V: "VSSSEG3E32.V", + VSSSEG3E64_V: "VSSSEG3E64.V", + VSSSEG3E8_V: "VSSSEG3E8.V", + VSSSEG4E16_V: "VSSSEG4E16.V", + VSSSEG4E32_V: "VSSSEG4E32.V", + VSSSEG4E64_V: "VSSSEG4E64.V", + VSSSEG4E8_V: "VSSSEG4E8.V", + VSSSEG5E16_V: "VSSSEG5E16.V", + VSSSEG5E32_V: "VSSSEG5E32.V", + VSSSEG5E64_V: "VSSSEG5E64.V", + VSSSEG5E8_V: "VSSSEG5E8.V", + VSSSEG6E16_V: "VSSSEG6E16.V", + VSSSEG6E32_V: "VSSSEG6E32.V", + VSSSEG6E64_V: "VSSSEG6E64.V", + VSSSEG6E8_V: "VSSSEG6E8.V", + VSSSEG7E16_V: "VSSSEG7E16.V", + VSSSEG7E32_V: "VSSSEG7E32.V", + VSSSEG7E64_V: "VSSSEG7E64.V", + VSSSEG7E8_V: "VSSSEG7E8.V", + VSSSEG8E16_V: "VSSSEG8E16.V", + VSSSEG8E32_V: "VSSSEG8E32.V", + VSSSEG8E64_V: "VSSSEG8E64.V", + VSSSEG8E8_V: "VSSSEG8E8.V", + VSSUBU_VV: "VSSUBU.VV", + VSSUBU_VX: "VSSUBU.VX", + VSSUB_VV: "VSSUB.VV", + VSSUB_VX: "VSSUB.VX", + VSUB_VV: "VSUB.VV", + VSUB_VX: "VSUB.VX", + VSUXEI16_V: "VSUXEI16.V", + VSUXEI32_V: "VSUXEI32.V", + VSUXEI64_V: "VSUXEI64.V", + VSUXEI8_V: "VSUXEI8.V", + VSUXSEG2EI16_V: "VSUXSEG2EI16.V", + VSUXSEG2EI32_V: "VSUXSEG2EI32.V", + VSUXSEG2EI64_V: "VSUXSEG2EI64.V", + VSUXSEG2EI8_V: "VSUXSEG2EI8.V", + VSUXSEG3EI16_V: "VSUXSEG3EI16.V", + VSUXSEG3EI32_V: "VSUXSEG3EI32.V", + VSUXSEG3EI64_V: "VSUXSEG3EI64.V", + VSUXSEG3EI8_V: "VSUXSEG3EI8.V", + VSUXSEG4EI16_V: "VSUXSEG4EI16.V", + VSUXSEG4EI32_V: "VSUXSEG4EI32.V", + VSUXSEG4EI64_V: "VSUXSEG4EI64.V", + VSUXSEG4EI8_V: "VSUXSEG4EI8.V", + VSUXSEG5EI16_V: "VSUXSEG5EI16.V", + VSUXSEG5EI32_V: "VSUXSEG5EI32.V", + VSUXSEG5EI64_V: "VSUXSEG5EI64.V", + VSUXSEG5EI8_V: "VSUXSEG5EI8.V", + VSUXSEG6EI16_V: "VSUXSEG6EI16.V", + VSUXSEG6EI32_V: "VSUXSEG6EI32.V", + VSUXSEG6EI64_V: "VSUXSEG6EI64.V", + VSUXSEG6EI8_V: "VSUXSEG6EI8.V", + VSUXSEG7EI16_V: "VSUXSEG7EI16.V", + VSUXSEG7EI32_V: "VSUXSEG7EI32.V", + VSUXSEG7EI64_V: "VSUXSEG7EI64.V", + VSUXSEG7EI8_V: "VSUXSEG7EI8.V", + VSUXSEG8EI16_V: "VSUXSEG8EI16.V", + VSUXSEG8EI32_V: "VSUXSEG8EI32.V", + VSUXSEG8EI64_V: "VSUXSEG8EI64.V", + VSUXSEG8EI8_V: "VSUXSEG8EI8.V", + VWADDU_VV: "VWADDU.VV", + VWADDU_VX: "VWADDU.VX", + VWADDU_WV: "VWADDU.WV", + VWADDU_WX: "VWADDU.WX", + VWADD_VV: "VWADD.VV", + VWADD_VX: "VWADD.VX", + VWADD_WV: "VWADD.WV", + VWADD_WX: "VWADD.WX", + VWMACCSU_VV: "VWMACCSU.VV", + VWMACCSU_VX: "VWMACCSU.VX", + VWMACCUS_VX: "VWMACCUS.VX", + VWMACCU_VV: "VWMACCU.VV", + VWMACCU_VX: "VWMACCU.VX", + VWMACC_VV: "VWMACC.VV", + VWMACC_VX: "VWMACC.VX", + VWMULSU_VV: "VWMULSU.VV", + VWMULSU_VX: "VWMULSU.VX", + VWMULU_VV: "VWMULU.VV", + VWMULU_VX: "VWMULU.VX", + VWMUL_VV: "VWMUL.VV", + VWMUL_VX: "VWMUL.VX", + VWREDSUMU_VS: "VWREDSUMU.VS", + VWREDSUM_VS: "VWREDSUM.VS", + VWSUBU_VV: "VWSUBU.VV", + VWSUBU_VX: "VWSUBU.VX", + VWSUBU_WV: "VWSUBU.WV", + VWSUBU_WX: "VWSUBU.WX", + VWSUB_VV: "VWSUB.VV", + VWSUB_VX: "VWSUB.VX", + VWSUB_WV: "VWSUB.WV", + VWSUB_WX: "VWSUB.WX", + VXOR_VI: "VXOR.VI", + VXOR_VV: "VXOR.VV", + VXOR_VX: "VXOR.VX", + VZEXT_VF2: "VZEXT.VF2", + VZEXT_VF4: "VZEXT.VF4", + VZEXT_VF8: "VZEXT.VF8", + XNOR: "XNOR", + XOR: "XOR", + XORI: "XORI", + ZEXT_H: "ZEXT.H", } var instFormats = [...]instFormat{ @@ -753,150 +2011,150 @@ var instFormats = [...]instFormat{ {mask: 0xfe00707f, value: 0x0000003b, op: ADDW, args: argTypeList{arg_rd, arg_rs1, arg_rs2}}, // ADD.UW rd, rs1, rs2 {mask: 0xfe00707f, value: 0x0800003b, op: ADD_UW, args: argTypeList{arg_rd, arg_rs1, arg_rs2}}, - // AMOADD.D rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x0000302f, op: AMOADD_D, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOADD.D.AQ rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x0400302f, op: AMOADD_D_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOADD.D.AQRL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x0600302f, op: AMOADD_D_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOADD.D.RL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x0200302f, op: AMOADD_D_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOADD.W rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x0000202f, op: AMOADD_W, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOADD.W.AQ rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x0400202f, op: AMOADD_W_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOADD.W.AQRL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x0600202f, op: AMOADD_W_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOADD.W.RL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x0200202f, op: AMOADD_W_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOAND.D rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x6000302f, op: AMOAND_D, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOAND.D.AQ rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x6400302f, op: AMOAND_D_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOAND.D.AQRL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x6600302f, op: AMOAND_D_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOAND.D.RL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x6200302f, op: AMOAND_D_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOAND.W rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x6000202f, op: AMOAND_W, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOAND.W.AQ rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x6400202f, op: AMOAND_W_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOAND.W.AQRL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x6600202f, op: AMOAND_W_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOAND.W.RL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x6200202f, op: AMOAND_W_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMAXU.D rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0xe000302f, op: AMOMAXU_D, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMAXU.D.AQ rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0xe400302f, op: AMOMAXU_D_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMAXU.D.AQRL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0xe600302f, op: AMOMAXU_D_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMAXU.D.RL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0xe200302f, op: AMOMAXU_D_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMAXU.W rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0xe000202f, op: AMOMAXU_W, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMAXU.W.AQ rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0xe400202f, op: AMOMAXU_W_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMAXU.W.AQRL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0xe600202f, op: AMOMAXU_W_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMAXU.W.RL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0xe200202f, op: AMOMAXU_W_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMAX.D rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0xa000302f, op: AMOMAX_D, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMAX.D.AQ rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0xa400302f, op: AMOMAX_D_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMAX.D.AQRL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0xa600302f, op: AMOMAX_D_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMAX.D.RL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0xa200302f, op: AMOMAX_D_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMAX.W rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0xa000202f, op: AMOMAX_W, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMAX.W.AQ rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0xa400202f, op: AMOMAX_W_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMAX.W.AQRL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0xa600202f, op: AMOMAX_W_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMAX.W.RL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0xa200202f, op: AMOMAX_W_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMINU.D rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0xc000302f, op: AMOMINU_D, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMINU.D.AQ rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0xc400302f, op: AMOMINU_D_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMINU.D.AQRL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0xc600302f, op: AMOMINU_D_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMINU.D.RL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0xc200302f, op: AMOMINU_D_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMINU.W rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0xc000202f, op: AMOMINU_W, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMINU.W.AQ rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0xc400202f, op: AMOMINU_W_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMINU.W.AQRL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0xc600202f, op: AMOMINU_W_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMINU.W.RL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0xc200202f, op: AMOMINU_W_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMIN.D rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x8000302f, op: AMOMIN_D, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMIN.D.AQ rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x8400302f, op: AMOMIN_D_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMIN.D.AQRL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x8600302f, op: AMOMIN_D_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMIN.D.RL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x8200302f, op: AMOMIN_D_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMIN.W rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x8000202f, op: AMOMIN_W, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMIN.W.AQ rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x8400202f, op: AMOMIN_W_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMIN.W.AQRL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x8600202f, op: AMOMIN_W_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOMIN.W.RL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x8200202f, op: AMOMIN_W_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOOR.D rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x4000302f, op: AMOOR_D, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOOR.D.AQ rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x4400302f, op: AMOOR_D_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOOR.D.AQRL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x4600302f, op: AMOOR_D_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOOR.D.RL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x4200302f, op: AMOOR_D_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOOR.W rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x4000202f, op: AMOOR_W, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOOR.W.AQ rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x4400202f, op: AMOOR_W_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOOR.W.AQRL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x4600202f, op: AMOOR_W_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOOR.W.RL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x4200202f, op: AMOOR_W_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOSWAP.D rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x0800302f, op: AMOSWAP_D, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOSWAP.D.AQ rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x0c00302f, op: AMOSWAP_D_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOSWAP.D.AQRL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x0e00302f, op: AMOSWAP_D_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOSWAP.D.RL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x0a00302f, op: AMOSWAP_D_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOSWAP.W rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x0800202f, op: AMOSWAP_W, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOSWAP.W.AQ rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x0c00202f, op: AMOSWAP_W_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOSWAP.W.AQRL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x0e00202f, op: AMOSWAP_W_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOSWAP.W.RL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x0a00202f, op: AMOSWAP_W_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOXOR.D rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x2000302f, op: AMOXOR_D, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOXOR.D.AQ rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x2400302f, op: AMOXOR_D_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOXOR.D.AQRL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x2600302f, op: AMOXOR_D_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOXOR.D.RL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x2200302f, op: AMOXOR_D_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOXOR.W rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x2000202f, op: AMOXOR_W, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOXOR.W.AQ rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x2400202f, op: AMOXOR_W_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOXOR.W.AQRL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x2600202f, op: AMOXOR_W_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // AMOXOR.W.RL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x2200202f, op: AMOXOR_W_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, + // AMOADD.D rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x0000302f, op: AMOADD_D, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOADD.D.AQ rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x0400302f, op: AMOADD_D_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOADD.D.AQRL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x0600302f, op: AMOADD_D_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOADD.D.RL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x0200302f, op: AMOADD_D_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOADD.W rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x0000202f, op: AMOADD_W, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOADD.W.AQ rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x0400202f, op: AMOADD_W_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOADD.W.AQRL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x0600202f, op: AMOADD_W_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOADD.W.RL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x0200202f, op: AMOADD_W_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOAND.D rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x6000302f, op: AMOAND_D, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOAND.D.AQ rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x6400302f, op: AMOAND_D_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOAND.D.AQRL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x6600302f, op: AMOAND_D_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOAND.D.RL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x6200302f, op: AMOAND_D_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOAND.W rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x6000202f, op: AMOAND_W, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOAND.W.AQ rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x6400202f, op: AMOAND_W_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOAND.W.AQRL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x6600202f, op: AMOAND_W_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOAND.W.RL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x6200202f, op: AMOAND_W_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMAXU.D rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0xe000302f, op: AMOMAXU_D, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMAXU.D.AQ rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0xe400302f, op: AMOMAXU_D_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMAXU.D.AQRL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0xe600302f, op: AMOMAXU_D_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMAXU.D.RL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0xe200302f, op: AMOMAXU_D_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMAXU.W rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0xe000202f, op: AMOMAXU_W, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMAXU.W.AQ rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0xe400202f, op: AMOMAXU_W_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMAXU.W.AQRL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0xe600202f, op: AMOMAXU_W_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMAXU.W.RL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0xe200202f, op: AMOMAXU_W_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMAX.D rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0xa000302f, op: AMOMAX_D, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMAX.D.AQ rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0xa400302f, op: AMOMAX_D_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMAX.D.AQRL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0xa600302f, op: AMOMAX_D_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMAX.D.RL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0xa200302f, op: AMOMAX_D_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMAX.W rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0xa000202f, op: AMOMAX_W, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMAX.W.AQ rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0xa400202f, op: AMOMAX_W_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMAX.W.AQRL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0xa600202f, op: AMOMAX_W_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMAX.W.RL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0xa200202f, op: AMOMAX_W_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMINU.D rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0xc000302f, op: AMOMINU_D, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMINU.D.AQ rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0xc400302f, op: AMOMINU_D_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMINU.D.AQRL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0xc600302f, op: AMOMINU_D_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMINU.D.RL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0xc200302f, op: AMOMINU_D_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMINU.W rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0xc000202f, op: AMOMINU_W, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMINU.W.AQ rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0xc400202f, op: AMOMINU_W_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMINU.W.AQRL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0xc600202f, op: AMOMINU_W_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMINU.W.RL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0xc200202f, op: AMOMINU_W_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMIN.D rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x8000302f, op: AMOMIN_D, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMIN.D.AQ rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x8400302f, op: AMOMIN_D_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMIN.D.AQRL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x8600302f, op: AMOMIN_D_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMIN.D.RL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x8200302f, op: AMOMIN_D_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMIN.W rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x8000202f, op: AMOMIN_W, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMIN.W.AQ rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x8400202f, op: AMOMIN_W_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMIN.W.AQRL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x8600202f, op: AMOMIN_W_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOMIN.W.RL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x8200202f, op: AMOMIN_W_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOOR.D rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x4000302f, op: AMOOR_D, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOOR.D.AQ rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x4400302f, op: AMOOR_D_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOOR.D.AQRL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x4600302f, op: AMOOR_D_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOOR.D.RL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x4200302f, op: AMOOR_D_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOOR.W rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x4000202f, op: AMOOR_W, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOOR.W.AQ rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x4400202f, op: AMOOR_W_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOOR.W.AQRL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x4600202f, op: AMOOR_W_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOOR.W.RL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x4200202f, op: AMOOR_W_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOSWAP.D rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x0800302f, op: AMOSWAP_D, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOSWAP.D.AQ rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x0c00302f, op: AMOSWAP_D_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOSWAP.D.AQRL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x0e00302f, op: AMOSWAP_D_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOSWAP.D.RL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x0a00302f, op: AMOSWAP_D_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOSWAP.W rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x0800202f, op: AMOSWAP_W, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOSWAP.W.AQ rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x0c00202f, op: AMOSWAP_W_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOSWAP.W.AQRL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x0e00202f, op: AMOSWAP_W_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOSWAP.W.RL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x0a00202f, op: AMOSWAP_W_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOXOR.D rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x2000302f, op: AMOXOR_D, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOXOR.D.AQ rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x2400302f, op: AMOXOR_D_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOXOR.D.AQRL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x2600302f, op: AMOXOR_D_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOXOR.D.RL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x2200302f, op: AMOXOR_D_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOXOR.W rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x2000202f, op: AMOXOR_W, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOXOR.W.AQ rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x2400202f, op: AMOXOR_W_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOXOR.W.AQRL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x2600202f, op: AMOXOR_W_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // AMOXOR.W.RL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x2200202f, op: AMOXOR_W_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, // AND rd, rs1, rs2 {mask: 0xfe00707f, value: 0x00007033, op: AND, args: argTypeList{arg_rd, arg_rs1, arg_rs2}}, // ANDI rd, rs1, imm12 @@ -957,6 +2215,10 @@ var instFormats = [...]instFormat{ {mask: 0xfff0707f, value: 0x60101013, op: CTZ, args: argTypeList{arg_rd, arg_rs1}}, // CTZW rd, rs1 {mask: 0xfff0707f, value: 0x6010101b, op: CTZW, args: argTypeList{arg_rd, arg_rs1}}, + // CZERO.EQZ rd, rs1, rs2 + {mask: 0xfe00707f, value: 0x0e005033, op: CZERO_EQZ, args: argTypeList{arg_rd, arg_rs1, arg_rs2}}, + // CZERO.NEZ rd, rs1, rs2 + {mask: 0xfe00707f, value: 0x0e007033, op: CZERO_NEZ, args: argTypeList{arg_rd, arg_rs1, arg_rs2}}, // C.ADD rd_rs1_n0, c_rs2_n0 {mask: 0x0000f003, value: 0x00009002, op: C_ADD, args: argTypeList{arg_rd_rs1_n0, arg_c_rs2_n0}}, // C.ADDI rd_rs1_n0, c_nzimm6 @@ -1315,22 +2577,22 @@ var instFormats = [...]instFormat{ {mask: 0x0000707f, value: 0x00001003, op: LH, args: argTypeList{arg_rd, arg_rs1_mem}}, // LHU rd, rs1_mem {mask: 0x0000707f, value: 0x00005003, op: LHU, args: argTypeList{arg_rd, arg_rs1_mem}}, - // LR.D rd, rs1_amo - {mask: 0xfff0707f, value: 0x1000302f, op: LR_D, args: argTypeList{arg_rd, arg_rs1_amo}}, - // LR.D.AQ rd, rs1_amo - {mask: 0xfff0707f, value: 0x1400302f, op: LR_D_AQ, args: argTypeList{arg_rd, arg_rs1_amo}}, - // LR.D.AQRL rd, rs1_amo - {mask: 0xfff0707f, value: 0x1600302f, op: LR_D_AQRL, args: argTypeList{arg_rd, arg_rs1_amo}}, - // LR.D.RL rd, rs1_amo - {mask: 0xfff0707f, value: 0x1200302f, op: LR_D_RL, args: argTypeList{arg_rd, arg_rs1_amo}}, - // LR.W rd, rs1_amo - {mask: 0xfff0707f, value: 0x1000202f, op: LR_W, args: argTypeList{arg_rd, arg_rs1_amo}}, - // LR.W.AQ rd, rs1_amo - {mask: 0xfff0707f, value: 0x1400202f, op: LR_W_AQ, args: argTypeList{arg_rd, arg_rs1_amo}}, - // LR.W.AQRL rd, rs1_amo - {mask: 0xfff0707f, value: 0x1600202f, op: LR_W_AQRL, args: argTypeList{arg_rd, arg_rs1_amo}}, - // LR.W.RL rd, rs1_amo - {mask: 0xfff0707f, value: 0x1200202f, op: LR_W_RL, args: argTypeList{arg_rd, arg_rs1_amo}}, + // LR.D rd, rs1_ptr + {mask: 0xfff0707f, value: 0x1000302f, op: LR_D, args: argTypeList{arg_rd, arg_rs1_ptr}}, + // LR.D.AQ rd, rs1_ptr + {mask: 0xfff0707f, value: 0x1400302f, op: LR_D_AQ, args: argTypeList{arg_rd, arg_rs1_ptr}}, + // LR.D.AQRL rd, rs1_ptr + {mask: 0xfff0707f, value: 0x1600302f, op: LR_D_AQRL, args: argTypeList{arg_rd, arg_rs1_ptr}}, + // LR.D.RL rd, rs1_ptr + {mask: 0xfff0707f, value: 0x1200302f, op: LR_D_RL, args: argTypeList{arg_rd, arg_rs1_ptr}}, + // LR.W rd, rs1_ptr + {mask: 0xfff0707f, value: 0x1000202f, op: LR_W, args: argTypeList{arg_rd, arg_rs1_ptr}}, + // LR.W.AQ rd, rs1_ptr + {mask: 0xfff0707f, value: 0x1400202f, op: LR_W_AQ, args: argTypeList{arg_rd, arg_rs1_ptr}}, + // LR.W.AQRL rd, rs1_ptr + {mask: 0xfff0707f, value: 0x1600202f, op: LR_W_AQRL, args: argTypeList{arg_rd, arg_rs1_ptr}}, + // LR.W.RL rd, rs1_ptr + {mask: 0xfff0707f, value: 0x1200202f, op: LR_W_RL, args: argTypeList{arg_rd, arg_rs1_ptr}}, // LUI rd, imm20 {mask: 0x0000007f, value: 0x00000037, op: LUI, args: argTypeList{arg_rd, arg_imm20}}, // LW rd, rs1_mem @@ -1387,22 +2649,22 @@ var instFormats = [...]instFormat{ {mask: 0xfe00707f, value: 0x6000503b, op: RORW, args: argTypeList{arg_rd, arg_rs1, arg_rs2}}, // SB rs2, rs1_store {mask: 0x0000707f, value: 0x00000023, op: SB, args: argTypeList{arg_rs2, arg_rs1_store}}, - // SC.D rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x1800302f, op: SC_D, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // SC.D.AQ rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x1c00302f, op: SC_D_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // SC.D.AQRL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x1e00302f, op: SC_D_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // SC.D.RL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x1a00302f, op: SC_D_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // SC.W rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x1800202f, op: SC_W, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // SC.W.AQ rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x1c00202f, op: SC_W_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // SC.W.AQRL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x1e00202f, op: SC_W_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, - // SC.W.RL rd, rs2, rs1_amo - {mask: 0xfe00707f, value: 0x1a00202f, op: SC_W_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_amo}}, + // SC.D rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x1800302f, op: SC_D, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // SC.D.AQ rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x1c00302f, op: SC_D_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // SC.D.AQRL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x1e00302f, op: SC_D_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // SC.D.RL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x1a00302f, op: SC_D_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // SC.W rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x1800202f, op: SC_W, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // SC.W.AQ rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x1c00202f, op: SC_W_AQ, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // SC.W.AQRL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x1e00202f, op: SC_W_AQRL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, + // SC.W.RL rd, rs2, rs1_ptr + {mask: 0xfe00707f, value: 0x1a00202f, op: SC_W_RL, args: argTypeList{arg_rd, arg_rs2, arg_rs1_ptr}}, // SD rs2, rs1_store {mask: 0x0000707f, value: 0x00003023, op: SD, args: argTypeList{arg_rs2, arg_rs1_store}}, // SEXT.B rd, rs1 @@ -1463,6 +2725,1260 @@ var instFormats = [...]instFormat{ {mask: 0xfe00707f, value: 0x4000003b, op: SUBW, args: argTypeList{arg_rd, arg_rs1, arg_rs2}}, // SW rs2, rs1_store {mask: 0x0000707f, value: 0x00002023, op: SW, args: argTypeList{arg_rs2, arg_rs1_store}}, + // VAADDU.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x20002057, op: VAADDU_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VAADDU.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x20006057, op: VAADDU_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VAADD.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x24002057, op: VAADD_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VAADD.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x24006057, op: VAADD_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VADC.VIM vs2, simm5, vd + {mask: 0xfe00707f, value: 0x40003057, op: VADC_VIM, args: argTypeList{arg_vs2, arg_simm5, arg_vd}}, + // VADC.VVM vs2, vs1, vd + {mask: 0xfe00707f, value: 0x40000057, op: VADC_VVM, args: argTypeList{arg_vs2, arg_vs1, arg_vd}}, + // VADC.VXM vs2, rs1, vd + {mask: 0xfe00707f, value: 0x40004057, op: VADC_VXM, args: argTypeList{arg_vs2, arg_rs1, arg_vd}}, + // VADD.VI vm, vs2, simm5, vd + {mask: 0xfc00707f, value: 0x00003057, op: VADD_VI, args: argTypeList{arg_vm, arg_vs2, arg_simm5, arg_vd}}, + // VADD.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x00000057, op: VADD_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VADD.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x00004057, op: VADD_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VAND.VI vm, vs2, simm5, vd + {mask: 0xfc00707f, value: 0x24003057, op: VAND_VI, args: argTypeList{arg_vm, arg_vs2, arg_simm5, arg_vd}}, + // VAND.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x24000057, op: VAND_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VAND.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x24004057, op: VAND_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VASUBU.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x28002057, op: VASUBU_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VASUBU.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x28006057, op: VASUBU_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VASUB.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x2c002057, op: VASUB_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VASUB.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x2c006057, op: VASUB_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VCOMPRESS.VM vs2, vs1, vd + {mask: 0xfe00707f, value: 0x5e002057, op: VCOMPRESS_VM, args: argTypeList{arg_vs2, arg_vs1, arg_vd}}, + // VCPOP.M vm, vs2, rd + {mask: 0xfc0ff07f, value: 0x40082057, op: VCPOP_M, args: argTypeList{arg_vm, arg_vs2, arg_rd}}, + // VDIVU.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x80002057, op: VDIVU_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VDIVU.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x80006057, op: VDIVU_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VDIV.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x84002057, op: VDIV_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VDIV.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x84006057, op: VDIV_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VFADD.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0x00005057, op: VFADD_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFADD.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x00001057, op: VFADD_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFCLASS.V vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x4c081057, op: VFCLASS_V, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFCVT.F.XU.V vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x48011057, op: VFCVT_F_XU_V, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFCVT.F.X.V vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x48019057, op: VFCVT_F_X_V, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFCVT.RTZ.XU.F.V vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x48031057, op: VFCVT_RTZ_XU_F_V, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFCVT.RTZ.X.F.V vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x48039057, op: VFCVT_RTZ_X_F_V, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFCVT.XU.F.V vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x48001057, op: VFCVT_XU_F_V, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFCVT.X.F.V vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x48009057, op: VFCVT_X_F_V, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFDIV.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0x80005057, op: VFDIV_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFDIV.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x80001057, op: VFDIV_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFIRST.M vm, vs2, rd + {mask: 0xfc0ff07f, value: 0x4008a057, op: VFIRST_M, args: argTypeList{arg_vm, arg_vs2, arg_rd}}, + // VFMACC.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0xb0005057, op: VFMACC_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFMACC.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xb0001057, op: VFMACC_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFMADD.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0xa0005057, op: VFMADD_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFMADD.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xa0001057, op: VFMADD_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFMAX.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0x18005057, op: VFMAX_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFMAX.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x18001057, op: VFMAX_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFMERGE.VFM vs2, fs1, vd + {mask: 0xfe00707f, value: 0x5c005057, op: VFMERGE_VFM, args: argTypeList{arg_vs2, arg_fs1, arg_vd}}, + // VFMIN.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0x10005057, op: VFMIN_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFMIN.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x10001057, op: VFMIN_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFMSAC.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0xb8005057, op: VFMSAC_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFMSAC.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xb8001057, op: VFMSAC_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFMSUB.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0xa8005057, op: VFMSUB_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFMSUB.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xa8001057, op: VFMSUB_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFMUL.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0x90005057, op: VFMUL_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFMUL.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x90001057, op: VFMUL_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFMV.F.S vs2, fd + {mask: 0xfe0ff07f, value: 0x42001057, op: VFMV_F_S, args: argTypeList{arg_vs2, arg_fd}}, + // VFMV.S.F fs1, vd + {mask: 0xfff0707f, value: 0x42005057, op: VFMV_S_F, args: argTypeList{arg_fs1, arg_vd}}, + // VFMV.V.F fs1, vd + {mask: 0xfff0707f, value: 0x5e005057, op: VFMV_V_F, args: argTypeList{arg_fs1, arg_vd}}, + // VFNCVT.F.F.W vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x480a1057, op: VFNCVT_F_F_W, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFNCVT.F.XU.W vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x48091057, op: VFNCVT_F_XU_W, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFNCVT.F.X.W vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x48099057, op: VFNCVT_F_X_W, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFNCVT.ROD.F.F.W vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x480a9057, op: VFNCVT_ROD_F_F_W, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFNCVT.RTZ.XU.F.W vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x480b1057, op: VFNCVT_RTZ_XU_F_W, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFNCVT.RTZ.X.F.W vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x480b9057, op: VFNCVT_RTZ_X_F_W, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFNCVT.XU.F.W vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x48081057, op: VFNCVT_XU_F_W, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFNCVT.X.F.W vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x48089057, op: VFNCVT_X_F_W, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFNMACC.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0xb4005057, op: VFNMACC_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFNMACC.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xb4001057, op: VFNMACC_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFNMADD.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0xa4005057, op: VFNMADD_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFNMADD.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xa4001057, op: VFNMADD_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFNMSAC.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0xbc005057, op: VFNMSAC_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFNMSAC.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xbc001057, op: VFNMSAC_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFNMSUB.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0xac005057, op: VFNMSUB_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFNMSUB.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xac001057, op: VFNMSUB_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFRDIV.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0x84005057, op: VFRDIV_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFREC7.V vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x4c029057, op: VFREC7_V, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFREDMAX.VS vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x1c001057, op: VFREDMAX_VS, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFREDMIN.VS vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x14001057, op: VFREDMIN_VS, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFREDOSUM.VS vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x0c001057, op: VFREDOSUM_VS, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFREDUSUM.VS vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x04001057, op: VFREDUSUM_VS, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFRSQRT7.V vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x4c021057, op: VFRSQRT7_V, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFRSUB.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0x9c005057, op: VFRSUB_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFSGNJN.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0x24005057, op: VFSGNJN_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFSGNJN.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x24001057, op: VFSGNJN_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFSGNJX.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0x28005057, op: VFSGNJX_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFSGNJX.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x28001057, op: VFSGNJX_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFSGNJ.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0x20005057, op: VFSGNJ_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFSGNJ.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x20001057, op: VFSGNJ_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFSLIDE1DOWN.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0x3c005057, op: VFSLIDE1DOWN_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFSLIDE1UP.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0x38005057, op: VFSLIDE1UP_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFSQRT.V vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x4c001057, op: VFSQRT_V, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFSUB.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0x08005057, op: VFSUB_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFSUB.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x08001057, op: VFSUB_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFWADD.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0xc0005057, op: VFWADD_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFWADD.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xc0001057, op: VFWADD_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFWADD.WF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0xd0005057, op: VFWADD_WF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFWADD.WV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xd0001057, op: VFWADD_WV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFWCVT.F.F.V vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x48061057, op: VFWCVT_F_F_V, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFWCVT.F.XU.V vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x48051057, op: VFWCVT_F_XU_V, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFWCVT.F.X.V vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x48059057, op: VFWCVT_F_X_V, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFWCVT.RTZ.XU.F.V vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x48071057, op: VFWCVT_RTZ_XU_F_V, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFWCVT.RTZ.X.F.V vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x48079057, op: VFWCVT_RTZ_X_F_V, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFWCVT.XU.F.V vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x48041057, op: VFWCVT_XU_F_V, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFWCVT.X.F.V vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x48049057, op: VFWCVT_X_F_V, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VFWMACC.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0xf0005057, op: VFWMACC_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFWMACC.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xf0001057, op: VFWMACC_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFWMSAC.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0xf8005057, op: VFWMSAC_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFWMSAC.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xf8001057, op: VFWMSAC_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFWMUL.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0xe0005057, op: VFWMUL_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFWMUL.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xe0001057, op: VFWMUL_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFWNMACC.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0xf4005057, op: VFWNMACC_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFWNMACC.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xf4001057, op: VFWNMACC_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFWNMSAC.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0xfc005057, op: VFWNMSAC_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFWNMSAC.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xfc001057, op: VFWNMSAC_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFWREDOSUM.VS vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xcc001057, op: VFWREDOSUM_VS, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFWREDUSUM.VS vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xc4001057, op: VFWREDUSUM_VS, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFWSUB.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0xc8005057, op: VFWSUB_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFWSUB.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xc8001057, op: VFWSUB_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VFWSUB.WF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0xd8005057, op: VFWSUB_WF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VFWSUB.WV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xd8001057, op: VFWSUB_WV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VID.V vm, vd + {mask: 0xfdfff07f, value: 0x5008a057, op: VID_V, args: argTypeList{arg_vm, arg_vd}}, + // VIOTA.M vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x50082057, op: VIOTA_M, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VL1RE16.V rs1_ptr, vd + {mask: 0xfff0707f, value: 0x02805007, op: VL1RE16_V, args: argTypeList{arg_rs1_ptr, arg_vd}}, + // VL1RE32.V rs1_ptr, vd + {mask: 0xfff0707f, value: 0x02806007, op: VL1RE32_V, args: argTypeList{arg_rs1_ptr, arg_vd}}, + // VL1RE64.V rs1_ptr, vd + {mask: 0xfff0707f, value: 0x02807007, op: VL1RE64_V, args: argTypeList{arg_rs1_ptr, arg_vd}}, + // VL1RE8.V rs1_ptr, vd + {mask: 0xfff0707f, value: 0x02800007, op: VL1RE8_V, args: argTypeList{arg_rs1_ptr, arg_vd}}, + // VL2RE16.V rs1_ptr, vd + {mask: 0xfff0707f, value: 0x22805007, op: VL2RE16_V, args: argTypeList{arg_rs1_ptr, arg_vd}}, + // VL2RE32.V rs1_ptr, vd + {mask: 0xfff0707f, value: 0x22806007, op: VL2RE32_V, args: argTypeList{arg_rs1_ptr, arg_vd}}, + // VL2RE64.V rs1_ptr, vd + {mask: 0xfff0707f, value: 0x22807007, op: VL2RE64_V, args: argTypeList{arg_rs1_ptr, arg_vd}}, + // VL2RE8.V rs1_ptr, vd + {mask: 0xfff0707f, value: 0x22800007, op: VL2RE8_V, args: argTypeList{arg_rs1_ptr, arg_vd}}, + // VL4RE16.V rs1_ptr, vd + {mask: 0xfff0707f, value: 0x62805007, op: VL4RE16_V, args: argTypeList{arg_rs1_ptr, arg_vd}}, + // VL4RE32.V rs1_ptr, vd + {mask: 0xfff0707f, value: 0x62806007, op: VL4RE32_V, args: argTypeList{arg_rs1_ptr, arg_vd}}, + // VL4RE64.V rs1_ptr, vd + {mask: 0xfff0707f, value: 0x62807007, op: VL4RE64_V, args: argTypeList{arg_rs1_ptr, arg_vd}}, + // VL4RE8.V rs1_ptr, vd + {mask: 0xfff0707f, value: 0x62800007, op: VL4RE8_V, args: argTypeList{arg_rs1_ptr, arg_vd}}, + // VL8RE16.V rs1_ptr, vd + {mask: 0xfff0707f, value: 0xe2805007, op: VL8RE16_V, args: argTypeList{arg_rs1_ptr, arg_vd}}, + // VL8RE32.V rs1_ptr, vd + {mask: 0xfff0707f, value: 0xe2806007, op: VL8RE32_V, args: argTypeList{arg_rs1_ptr, arg_vd}}, + // VL8RE64.V rs1_ptr, vd + {mask: 0xfff0707f, value: 0xe2807007, op: VL8RE64_V, args: argTypeList{arg_rs1_ptr, arg_vd}}, + // VL8RE8.V rs1_ptr, vd + {mask: 0xfff0707f, value: 0xe2800007, op: VL8RE8_V, args: argTypeList{arg_rs1_ptr, arg_vd}}, + // VLE16FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x01005007, op: VLE16FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLE16.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x00005007, op: VLE16_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLE32FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x01006007, op: VLE32FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLE32.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x00006007, op: VLE32_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLE64FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x01007007, op: VLE64FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLE64.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x00007007, op: VLE64_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLE8FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x01000007, op: VLE8FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLE8.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x00000007, op: VLE8_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLM.V rs1_ptr, vd + {mask: 0xfff0707f, value: 0x02b00007, op: VLM_V, args: argTypeList{arg_rs1_ptr, arg_vd}}, + // VLOXEI16.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x0c005007, op: VLOXEI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXEI32.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x0c006007, op: VLOXEI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXEI64.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x0c007007, op: VLOXEI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXEI8.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x0c000007, op: VLOXEI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG2EI16.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x2c005007, op: VLOXSEG2EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG2EI32.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x2c006007, op: VLOXSEG2EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG2EI64.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x2c007007, op: VLOXSEG2EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG2EI8.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x2c000007, op: VLOXSEG2EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG3EI16.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x4c005007, op: VLOXSEG3EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG3EI32.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x4c006007, op: VLOXSEG3EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG3EI64.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x4c007007, op: VLOXSEG3EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG3EI8.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x4c000007, op: VLOXSEG3EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG4EI16.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x6c005007, op: VLOXSEG4EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG4EI32.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x6c006007, op: VLOXSEG4EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG4EI64.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x6c007007, op: VLOXSEG4EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG4EI8.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x6c000007, op: VLOXSEG4EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG5EI16.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x8c005007, op: VLOXSEG5EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG5EI32.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x8c006007, op: VLOXSEG5EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG5EI64.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x8c007007, op: VLOXSEG5EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG5EI8.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x8c000007, op: VLOXSEG5EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG6EI16.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xac005007, op: VLOXSEG6EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG6EI32.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xac006007, op: VLOXSEG6EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG6EI64.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xac007007, op: VLOXSEG6EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG6EI8.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xac000007, op: VLOXSEG6EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG7EI16.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xcc005007, op: VLOXSEG7EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG7EI32.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xcc006007, op: VLOXSEG7EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG7EI64.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xcc007007, op: VLOXSEG7EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG7EI8.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xcc000007, op: VLOXSEG7EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG8EI16.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xec005007, op: VLOXSEG8EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG8EI32.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xec006007, op: VLOXSEG8EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG8EI64.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xec007007, op: VLOXSEG8EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLOXSEG8EI8.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xec000007, op: VLOXSEG8EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLSE16.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x08005007, op: VLSE16_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSE32.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x08006007, op: VLSE32_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSE64.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x08007007, op: VLSE64_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSE8.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x08000007, op: VLSE8_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSEG2E16FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x21005007, op: VLSEG2E16FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG2E16.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x20005007, op: VLSEG2E16_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG2E32FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x21006007, op: VLSEG2E32FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG2E32.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x20006007, op: VLSEG2E32_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG2E64FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x21007007, op: VLSEG2E64FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG2E64.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x20007007, op: VLSEG2E64_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG2E8FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x21000007, op: VLSEG2E8FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG2E8.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x20000007, op: VLSEG2E8_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG3E16FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x41005007, op: VLSEG3E16FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG3E16.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x40005007, op: VLSEG3E16_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG3E32FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x41006007, op: VLSEG3E32FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG3E32.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x40006007, op: VLSEG3E32_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG3E64FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x41007007, op: VLSEG3E64FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG3E64.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x40007007, op: VLSEG3E64_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG3E8FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x41000007, op: VLSEG3E8FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG3E8.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x40000007, op: VLSEG3E8_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG4E16FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x61005007, op: VLSEG4E16FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG4E16.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x60005007, op: VLSEG4E16_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG4E32FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x61006007, op: VLSEG4E32FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG4E32.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x60006007, op: VLSEG4E32_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG4E64FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x61007007, op: VLSEG4E64FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG4E64.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x60007007, op: VLSEG4E64_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG4E8FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x61000007, op: VLSEG4E8FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG4E8.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x60000007, op: VLSEG4E8_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG5E16FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x81005007, op: VLSEG5E16FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG5E16.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x80005007, op: VLSEG5E16_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG5E32FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x81006007, op: VLSEG5E32FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG5E32.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x80006007, op: VLSEG5E32_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG5E64FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x81007007, op: VLSEG5E64FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG5E64.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x80007007, op: VLSEG5E64_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG5E8FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x81000007, op: VLSEG5E8FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG5E8.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0x80000007, op: VLSEG5E8_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG6E16FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0xa1005007, op: VLSEG6E16FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG6E16.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0xa0005007, op: VLSEG6E16_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG6E32FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0xa1006007, op: VLSEG6E32FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG6E32.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0xa0006007, op: VLSEG6E32_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG6E64FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0xa1007007, op: VLSEG6E64FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG6E64.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0xa0007007, op: VLSEG6E64_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG6E8FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0xa1000007, op: VLSEG6E8FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG6E8.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0xa0000007, op: VLSEG6E8_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG7E16FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0xc1005007, op: VLSEG7E16FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG7E16.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0xc0005007, op: VLSEG7E16_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG7E32FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0xc1006007, op: VLSEG7E32FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG7E32.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0xc0006007, op: VLSEG7E32_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG7E64FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0xc1007007, op: VLSEG7E64FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG7E64.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0xc0007007, op: VLSEG7E64_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG7E8FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0xc1000007, op: VLSEG7E8FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG7E8.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0xc0000007, op: VLSEG7E8_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG8E16FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0xe1005007, op: VLSEG8E16FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG8E16.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0xe0005007, op: VLSEG8E16_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG8E32FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0xe1006007, op: VLSEG8E32FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG8E32.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0xe0006007, op: VLSEG8E32_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG8E64FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0xe1007007, op: VLSEG8E64FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG8E64.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0xe0007007, op: VLSEG8E64_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG8E8FF.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0xe1000007, op: VLSEG8E8FF_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSEG8E8.V vm, rs1_ptr, vd + {mask: 0xfdf0707f, value: 0xe0000007, op: VLSEG8E8_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vd}}, + // VLSSEG2E16.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x28005007, op: VLSSEG2E16_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG2E32.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x28006007, op: VLSSEG2E32_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG2E64.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x28007007, op: VLSSEG2E64_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG2E8.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x28000007, op: VLSSEG2E8_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG3E16.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x48005007, op: VLSSEG3E16_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG3E32.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x48006007, op: VLSSEG3E32_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG3E64.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x48007007, op: VLSSEG3E64_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG3E8.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x48000007, op: VLSSEG3E8_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG4E16.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x68005007, op: VLSSEG4E16_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG4E32.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x68006007, op: VLSSEG4E32_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG4E64.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x68007007, op: VLSSEG4E64_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG4E8.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x68000007, op: VLSSEG4E8_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG5E16.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x88005007, op: VLSSEG5E16_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG5E32.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x88006007, op: VLSSEG5E32_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG5E64.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x88007007, op: VLSSEG5E64_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG5E8.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x88000007, op: VLSSEG5E8_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG6E16.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xa8005007, op: VLSSEG6E16_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG6E32.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xa8006007, op: VLSSEG6E32_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG6E64.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xa8007007, op: VLSSEG6E64_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG6E8.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xa8000007, op: VLSSEG6E8_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG7E16.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xc8005007, op: VLSSEG7E16_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG7E32.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xc8006007, op: VLSSEG7E32_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG7E64.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xc8007007, op: VLSSEG7E64_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG7E8.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xc8000007, op: VLSSEG7E8_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG8E16.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xe8005007, op: VLSSEG8E16_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG8E32.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xe8006007, op: VLSSEG8E32_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG8E64.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xe8007007, op: VLSSEG8E64_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLSSEG8E8.V vm, rs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xe8000007, op: VLSSEG8E8_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vd}}, + // VLUXEI16.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x04005007, op: VLUXEI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXEI32.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x04006007, op: VLUXEI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXEI64.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x04007007, op: VLUXEI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXEI8.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x04000007, op: VLUXEI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG2EI16.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x24005007, op: VLUXSEG2EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG2EI32.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x24006007, op: VLUXSEG2EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG2EI64.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x24007007, op: VLUXSEG2EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG2EI8.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x24000007, op: VLUXSEG2EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG3EI16.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x44005007, op: VLUXSEG3EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG3EI32.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x44006007, op: VLUXSEG3EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG3EI64.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x44007007, op: VLUXSEG3EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG3EI8.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x44000007, op: VLUXSEG3EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG4EI16.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x64005007, op: VLUXSEG4EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG4EI32.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x64006007, op: VLUXSEG4EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG4EI64.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x64007007, op: VLUXSEG4EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG4EI8.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x64000007, op: VLUXSEG4EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG5EI16.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x84005007, op: VLUXSEG5EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG5EI32.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x84006007, op: VLUXSEG5EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG5EI64.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x84007007, op: VLUXSEG5EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG5EI8.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0x84000007, op: VLUXSEG5EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG6EI16.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xa4005007, op: VLUXSEG6EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG6EI32.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xa4006007, op: VLUXSEG6EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG6EI64.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xa4007007, op: VLUXSEG6EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG6EI8.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xa4000007, op: VLUXSEG6EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG7EI16.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xc4005007, op: VLUXSEG7EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG7EI32.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xc4006007, op: VLUXSEG7EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG7EI64.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xc4007007, op: VLUXSEG7EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG7EI8.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xc4000007, op: VLUXSEG7EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG8EI16.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xe4005007, op: VLUXSEG8EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG8EI32.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xe4006007, op: VLUXSEG8EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG8EI64.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xe4007007, op: VLUXSEG8EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VLUXSEG8EI8.V vm, vs2, rs1_ptr, vd + {mask: 0xfc00707f, value: 0xe4000007, op: VLUXSEG8EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vd}}, + // VMACC.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xb4002057, op: VMACC_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VMACC.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xb4006057, op: VMACC_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VMADC.VI vs2, simm5, vd + {mask: 0xfe00707f, value: 0x46003057, op: VMADC_VI, args: argTypeList{arg_vs2, arg_simm5, arg_vd}}, + // VMADC.VIM vs2, simm5, vd + {mask: 0xfe00707f, value: 0x44003057, op: VMADC_VIM, args: argTypeList{arg_vs2, arg_simm5, arg_vd}}, + // VMADC.VV vs2, vs1, vd + {mask: 0xfe00707f, value: 0x46000057, op: VMADC_VV, args: argTypeList{arg_vs2, arg_vs1, arg_vd}}, + // VMADC.VVM vs2, vs1, vd + {mask: 0xfe00707f, value: 0x44000057, op: VMADC_VVM, args: argTypeList{arg_vs2, arg_vs1, arg_vd}}, + // VMADC.VX vs2, rs1, vd + {mask: 0xfe00707f, value: 0x46004057, op: VMADC_VX, args: argTypeList{arg_vs2, arg_rs1, arg_vd}}, + // VMADC.VXM vs2, rs1, vd + {mask: 0xfe00707f, value: 0x44004057, op: VMADC_VXM, args: argTypeList{arg_vs2, arg_rs1, arg_vd}}, + // VMADD.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xa4002057, op: VMADD_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VMADD.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xa4006057, op: VMADD_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VMANDN.MM vs2, vs1, vd + {mask: 0xfe00707f, value: 0x62002057, op: VMANDN_MM, args: argTypeList{arg_vs2, arg_vs1, arg_vd}}, + // VMAND.MM vs2, vs1, vd + {mask: 0xfe00707f, value: 0x66002057, op: VMAND_MM, args: argTypeList{arg_vs2, arg_vs1, arg_vd}}, + // VMAXU.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x18000057, op: VMAXU_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VMAXU.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x18004057, op: VMAXU_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VMAX.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x1c000057, op: VMAX_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VMAX.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x1c004057, op: VMAX_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VMERGE.VIM vs2, simm5, vd + {mask: 0xfe00707f, value: 0x5c003057, op: VMERGE_VIM, args: argTypeList{arg_vs2, arg_simm5, arg_vd}}, + // VMERGE.VVM vs2, vs1, vd + {mask: 0xfe00707f, value: 0x5c000057, op: VMERGE_VVM, args: argTypeList{arg_vs2, arg_vs1, arg_vd}}, + // VMERGE.VXM vs2, rs1, vd + {mask: 0xfe00707f, value: 0x5c004057, op: VMERGE_VXM, args: argTypeList{arg_vs2, arg_rs1, arg_vd}}, + // VMFEQ.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0x60005057, op: VMFEQ_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VMFEQ.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x60001057, op: VMFEQ_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VMFGE.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0x7c005057, op: VMFGE_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VMFGT.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0x74005057, op: VMFGT_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VMFLE.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0x64005057, op: VMFLE_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VMFLE.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x64001057, op: VMFLE_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VMFLT.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0x6c005057, op: VMFLT_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VMFLT.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x6c001057, op: VMFLT_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VMFNE.VF vm, vs2, fs1, vd + {mask: 0xfc00707f, value: 0x70005057, op: VMFNE_VF, args: argTypeList{arg_vm, arg_vs2, arg_fs1, arg_vd}}, + // VMFNE.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x70001057, op: VMFNE_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VMINU.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x10000057, op: VMINU_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VMINU.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x10004057, op: VMINU_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VMIN.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x14000057, op: VMIN_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VMIN.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x14004057, op: VMIN_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VMNAND.MM vs2, vs1, vd + {mask: 0xfe00707f, value: 0x76002057, op: VMNAND_MM, args: argTypeList{arg_vs2, arg_vs1, arg_vd}}, + // VMNOR.MM vs2, vs1, vd + {mask: 0xfe00707f, value: 0x7a002057, op: VMNOR_MM, args: argTypeList{arg_vs2, arg_vs1, arg_vd}}, + // VMORN.MM vs2, vs1, vd + {mask: 0xfe00707f, value: 0x72002057, op: VMORN_MM, args: argTypeList{arg_vs2, arg_vs1, arg_vd}}, + // VMOR.MM vs2, vs1, vd + {mask: 0xfe00707f, value: 0x6a002057, op: VMOR_MM, args: argTypeList{arg_vs2, arg_vs1, arg_vd}}, + // VMSBC.VV vs2, vs1, vd + {mask: 0xfe00707f, value: 0x4e000057, op: VMSBC_VV, args: argTypeList{arg_vs2, arg_vs1, arg_vd}}, + // VMSBC.VVM vs2, vs1, vd + {mask: 0xfe00707f, value: 0x4c000057, op: VMSBC_VVM, args: argTypeList{arg_vs2, arg_vs1, arg_vd}}, + // VMSBC.VX vs2, rs1, vd + {mask: 0xfe00707f, value: 0x4e004057, op: VMSBC_VX, args: argTypeList{arg_vs2, arg_rs1, arg_vd}}, + // VMSBC.VXM vs2, rs1, vd + {mask: 0xfe00707f, value: 0x4c004057, op: VMSBC_VXM, args: argTypeList{arg_vs2, arg_rs1, arg_vd}}, + // VMSBF.M vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x5000a057, op: VMSBF_M, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VMSEQ.VI vm, vs2, simm5, vd + {mask: 0xfc00707f, value: 0x60003057, op: VMSEQ_VI, args: argTypeList{arg_vm, arg_vs2, arg_simm5, arg_vd}}, + // VMSEQ.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x60000057, op: VMSEQ_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VMSEQ.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x60004057, op: VMSEQ_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VMSGTU.VI vm, vs2, simm5, vd + {mask: 0xfc00707f, value: 0x78003057, op: VMSGTU_VI, args: argTypeList{arg_vm, arg_vs2, arg_simm5, arg_vd}}, + // VMSGTU.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x78004057, op: VMSGTU_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VMSGT.VI vm, vs2, simm5, vd + {mask: 0xfc00707f, value: 0x7c003057, op: VMSGT_VI, args: argTypeList{arg_vm, arg_vs2, arg_simm5, arg_vd}}, + // VMSGT.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x7c004057, op: VMSGT_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VMSIF.M vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x5001a057, op: VMSIF_M, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VMSLEU.VI vm, vs2, simm5, vd + {mask: 0xfc00707f, value: 0x70003057, op: VMSLEU_VI, args: argTypeList{arg_vm, arg_vs2, arg_simm5, arg_vd}}, + // VMSLEU.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x70000057, op: VMSLEU_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VMSLEU.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x70004057, op: VMSLEU_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VMSLE.VI vm, vs2, simm5, vd + {mask: 0xfc00707f, value: 0x74003057, op: VMSLE_VI, args: argTypeList{arg_vm, arg_vs2, arg_simm5, arg_vd}}, + // VMSLE.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x74000057, op: VMSLE_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VMSLE.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x74004057, op: VMSLE_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VMSLTU.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x68000057, op: VMSLTU_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VMSLTU.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x68004057, op: VMSLTU_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VMSLT.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x6c000057, op: VMSLT_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VMSLT.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x6c004057, op: VMSLT_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VMSNE.VI vm, vs2, simm5, vd + {mask: 0xfc00707f, value: 0x64003057, op: VMSNE_VI, args: argTypeList{arg_vm, arg_vs2, arg_simm5, arg_vd}}, + // VMSNE.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x64000057, op: VMSNE_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VMSNE.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x64004057, op: VMSNE_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VMSOF.M vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x50012057, op: VMSOF_M, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VMULHSU.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x98002057, op: VMULHSU_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VMULHSU.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x98006057, op: VMULHSU_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VMULHU.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x90002057, op: VMULHU_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VMULHU.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x90006057, op: VMULHU_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VMULH.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x9c002057, op: VMULH_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VMULH.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x9c006057, op: VMULH_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VMUL.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x94002057, op: VMUL_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VMUL.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x94006057, op: VMUL_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VMV1R.V vs2, vd + {mask: 0xfe0ff07f, value: 0x9e003057, op: VMV1R_V, args: argTypeList{arg_vs2, arg_vd}}, + // VMV2R.V vs2, vd + {mask: 0xfe0ff07f, value: 0x9e00b057, op: VMV2R_V, args: argTypeList{arg_vs2, arg_vd}}, + // VMV4R.V vs2, vd + {mask: 0xfe0ff07f, value: 0x9e01b057, op: VMV4R_V, args: argTypeList{arg_vs2, arg_vd}}, + // VMV8R.V vs2, vd + {mask: 0xfe0ff07f, value: 0x9e03b057, op: VMV8R_V, args: argTypeList{arg_vs2, arg_vd}}, + // VMV.S.X rs1, vd + {mask: 0xfff0707f, value: 0x42006057, op: VMV_S_X, args: argTypeList{arg_rs1, arg_vd}}, + // VMV.V.I simm5, vd + {mask: 0xfff0707f, value: 0x5e003057, op: VMV_V_I, args: argTypeList{arg_simm5, arg_vd}}, + // VMV.V.V vs1, vd + {mask: 0xfff0707f, value: 0x5e000057, op: VMV_V_V, args: argTypeList{arg_vs1, arg_vd}}, + // VMV.V.X rs1, vd + {mask: 0xfff0707f, value: 0x5e004057, op: VMV_V_X, args: argTypeList{arg_rs1, arg_vd}}, + // VMV.X.S vs2, rd + {mask: 0xfe0ff07f, value: 0x42002057, op: VMV_X_S, args: argTypeList{arg_vs2, arg_rd}}, + // VMXNOR.MM vs2, vs1, vd + {mask: 0xfe00707f, value: 0x7e002057, op: VMXNOR_MM, args: argTypeList{arg_vs2, arg_vs1, arg_vd}}, + // VMXOR.MM vs2, vs1, vd + {mask: 0xfe00707f, value: 0x6e002057, op: VMXOR_MM, args: argTypeList{arg_vs2, arg_vs1, arg_vd}}, + // VNCLIPU.WI vm, vs2, zimm5, vd + {mask: 0xfc00707f, value: 0xb8003057, op: VNCLIPU_WI, args: argTypeList{arg_vm, arg_vs2, arg_zimm5, arg_vd}}, + // VNCLIPU.WV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xb8000057, op: VNCLIPU_WV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VNCLIPU.WX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xb8004057, op: VNCLIPU_WX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VNCLIP.WI vm, vs2, zimm5, vd + {mask: 0xfc00707f, value: 0xbc003057, op: VNCLIP_WI, args: argTypeList{arg_vm, arg_vs2, arg_zimm5, arg_vd}}, + // VNCLIP.WV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xbc000057, op: VNCLIP_WV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VNCLIP.WX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xbc004057, op: VNCLIP_WX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VNMSAC.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xbc002057, op: VNMSAC_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VNMSAC.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xbc006057, op: VNMSAC_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VNMSUB.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xac002057, op: VNMSUB_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VNMSUB.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xac006057, op: VNMSUB_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VNSRA.WI vm, vs2, zimm5, vd + {mask: 0xfc00707f, value: 0xb4003057, op: VNSRA_WI, args: argTypeList{arg_vm, arg_vs2, arg_zimm5, arg_vd}}, + // VNSRA.WV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xb4000057, op: VNSRA_WV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VNSRA.WX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xb4004057, op: VNSRA_WX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VNSRL.WI vm, vs2, zimm5, vd + {mask: 0xfc00707f, value: 0xb0003057, op: VNSRL_WI, args: argTypeList{arg_vm, arg_vs2, arg_zimm5, arg_vd}}, + // VNSRL.WV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xb0000057, op: VNSRL_WV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VNSRL.WX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xb0004057, op: VNSRL_WX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VOR.VI vm, vs2, simm5, vd + {mask: 0xfc00707f, value: 0x28003057, op: VOR_VI, args: argTypeList{arg_vm, arg_vs2, arg_simm5, arg_vd}}, + // VOR.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x28000057, op: VOR_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VOR.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x28004057, op: VOR_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VREDAND.VS vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x04002057, op: VREDAND_VS, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VREDMAXU.VS vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x18002057, op: VREDMAXU_VS, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VREDMAX.VS vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x1c002057, op: VREDMAX_VS, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VREDMINU.VS vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x10002057, op: VREDMINU_VS, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VREDMIN.VS vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x14002057, op: VREDMIN_VS, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VREDOR.VS vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x08002057, op: VREDOR_VS, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VREDSUM.VS vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x00002057, op: VREDSUM_VS, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VREDXOR.VS vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x0c002057, op: VREDXOR_VS, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VREMU.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x88002057, op: VREMU_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VREMU.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x88006057, op: VREMU_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VREM.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x8c002057, op: VREM_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VREM.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x8c006057, op: VREM_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VRGATHEREI16.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x38000057, op: VRGATHEREI16_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VRGATHER.VI vm, vs2, zimm5, vd + {mask: 0xfc00707f, value: 0x30003057, op: VRGATHER_VI, args: argTypeList{arg_vm, arg_vs2, arg_zimm5, arg_vd}}, + // VRGATHER.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x30000057, op: VRGATHER_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VRGATHER.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x30004057, op: VRGATHER_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VRSUB.VI vm, vs2, simm5, vd + {mask: 0xfc00707f, value: 0x0c003057, op: VRSUB_VI, args: argTypeList{arg_vm, arg_vs2, arg_simm5, arg_vd}}, + // VRSUB.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x0c004057, op: VRSUB_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VS1R.V rs1_ptr, vs3 + {mask: 0xfff0707f, value: 0x02800027, op: VS1R_V, args: argTypeList{arg_rs1_ptr, arg_vs3}}, + // VS2R.V rs1_ptr, vs3 + {mask: 0xfff0707f, value: 0x22800027, op: VS2R_V, args: argTypeList{arg_rs1_ptr, arg_vs3}}, + // VS4R.V rs1_ptr, vs3 + {mask: 0xfff0707f, value: 0x62800027, op: VS4R_V, args: argTypeList{arg_rs1_ptr, arg_vs3}}, + // VS8R.V rs1_ptr, vs3 + {mask: 0xfff0707f, value: 0xe2800027, op: VS8R_V, args: argTypeList{arg_rs1_ptr, arg_vs3}}, + // VSADDU.VI vm, vs2, simm5, vd + {mask: 0xfc00707f, value: 0x80003057, op: VSADDU_VI, args: argTypeList{arg_vm, arg_vs2, arg_simm5, arg_vd}}, + // VSADDU.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x80000057, op: VSADDU_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VSADDU.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x80004057, op: VSADDU_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VSADD.VI vm, vs2, simm5, vd + {mask: 0xfc00707f, value: 0x84003057, op: VSADD_VI, args: argTypeList{arg_vm, arg_vs2, arg_simm5, arg_vd}}, + // VSADD.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x84000057, op: VSADD_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VSADD.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x84004057, op: VSADD_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VSBC.VVM vs2, vs1, vd + {mask: 0xfe00707f, value: 0x48000057, op: VSBC_VVM, args: argTypeList{arg_vs2, arg_vs1, arg_vd}}, + // VSBC.VXM vs2, rs1, vd + {mask: 0xfe00707f, value: 0x48004057, op: VSBC_VXM, args: argTypeList{arg_vs2, arg_rs1, arg_vd}}, + // VSE16.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0x00005027, op: VSE16_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSE32.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0x00006027, op: VSE32_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSE64.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0x00007027, op: VSE64_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSE8.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0x00000027, op: VSE8_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSETIVLI vtype_zimm10, zimm, rd + {mask: 0xc000707f, value: 0xc0007057, op: VSETIVLI, args: argTypeList{arg_vtype_zimm10, arg_zimm, arg_rd}}, + // VSETVL rs2, rs1, rd + {mask: 0xfe00707f, value: 0x80007057, op: VSETVL, args: argTypeList{arg_rs2, arg_rs1, arg_rd}}, + // VSETVLI vtype_zimm11, rs1, rd + {mask: 0x8000707f, value: 0x00007057, op: VSETVLI, args: argTypeList{arg_vtype_zimm11, arg_rs1, arg_rd}}, + // VSEXT.VF2 vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x4803a057, op: VSEXT_VF2, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VSEXT.VF4 vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x4802a057, op: VSEXT_VF4, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VSEXT.VF8 vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x4801a057, op: VSEXT_VF8, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VSLIDE1DOWN.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x3c006057, op: VSLIDE1DOWN_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VSLIDE1UP.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x38006057, op: VSLIDE1UP_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VSLIDEDOWN.VI vm, vs2, zimm5, vd + {mask: 0xfc00707f, value: 0x3c003057, op: VSLIDEDOWN_VI, args: argTypeList{arg_vm, arg_vs2, arg_zimm5, arg_vd}}, + // VSLIDEDOWN.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x3c004057, op: VSLIDEDOWN_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VSLIDEUP.VI vm, vs2, zimm5, vd + {mask: 0xfc00707f, value: 0x38003057, op: VSLIDEUP_VI, args: argTypeList{arg_vm, arg_vs2, arg_zimm5, arg_vd}}, + // VSLIDEUP.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x38004057, op: VSLIDEUP_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VSLL.VI vm, vs2, zimm5, vd + {mask: 0xfc00707f, value: 0x94003057, op: VSLL_VI, args: argTypeList{arg_vm, arg_vs2, arg_zimm5, arg_vd}}, + // VSLL.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x94000057, op: VSLL_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VSLL.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x94004057, op: VSLL_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VSMUL.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x9c000057, op: VSMUL_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VSMUL.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x9c004057, op: VSMUL_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VSM.V rs1_ptr, vs3 + {mask: 0xfff0707f, value: 0x02b00027, op: VSM_V, args: argTypeList{arg_rs1_ptr, arg_vs3}}, + // VSOXEI16.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x0c005027, op: VSOXEI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXEI32.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x0c006027, op: VSOXEI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXEI64.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x0c007027, op: VSOXEI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXEI8.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x0c000027, op: VSOXEI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG2EI16.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x2c005027, op: VSOXSEG2EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG2EI32.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x2c006027, op: VSOXSEG2EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG2EI64.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x2c007027, op: VSOXSEG2EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG2EI8.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x2c000027, op: VSOXSEG2EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG3EI16.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x4c005027, op: VSOXSEG3EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG3EI32.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x4c006027, op: VSOXSEG3EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG3EI64.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x4c007027, op: VSOXSEG3EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG3EI8.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x4c000027, op: VSOXSEG3EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG4EI16.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x6c005027, op: VSOXSEG4EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG4EI32.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x6c006027, op: VSOXSEG4EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG4EI64.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x6c007027, op: VSOXSEG4EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG4EI8.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x6c000027, op: VSOXSEG4EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG5EI16.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x8c005027, op: VSOXSEG5EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG5EI32.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x8c006027, op: VSOXSEG5EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG5EI64.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x8c007027, op: VSOXSEG5EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG5EI8.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x8c000027, op: VSOXSEG5EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG6EI16.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xac005027, op: VSOXSEG6EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG6EI32.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xac006027, op: VSOXSEG6EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG6EI64.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xac007027, op: VSOXSEG6EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG6EI8.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xac000027, op: VSOXSEG6EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG7EI16.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xcc005027, op: VSOXSEG7EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG7EI32.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xcc006027, op: VSOXSEG7EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG7EI64.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xcc007027, op: VSOXSEG7EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG7EI8.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xcc000027, op: VSOXSEG7EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG8EI16.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xec005027, op: VSOXSEG8EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG8EI32.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xec006027, op: VSOXSEG8EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG8EI64.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xec007027, op: VSOXSEG8EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSOXSEG8EI8.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xec000027, op: VSOXSEG8EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSRA.VI vm, vs2, zimm5, vd + {mask: 0xfc00707f, value: 0xa4003057, op: VSRA_VI, args: argTypeList{arg_vm, arg_vs2, arg_zimm5, arg_vd}}, + // VSRA.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xa4000057, op: VSRA_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VSRA.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xa4004057, op: VSRA_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VSRL.VI vm, vs2, zimm5, vd + {mask: 0xfc00707f, value: 0xa0003057, op: VSRL_VI, args: argTypeList{arg_vm, arg_vs2, arg_zimm5, arg_vd}}, + // VSRL.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xa0000057, op: VSRL_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VSRL.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xa0004057, op: VSRL_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VSSE16.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x08005027, op: VSSE16_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSE32.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x08006027, op: VSSE32_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSE64.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x08007027, op: VSSE64_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSE8.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x08000027, op: VSSE8_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSEG2E16.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0x20005027, op: VSSEG2E16_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG2E32.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0x20006027, op: VSSEG2E32_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG2E64.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0x20007027, op: VSSEG2E64_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG2E8.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0x20000027, op: VSSEG2E8_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG3E16.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0x40005027, op: VSSEG3E16_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG3E32.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0x40006027, op: VSSEG3E32_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG3E64.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0x40007027, op: VSSEG3E64_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG3E8.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0x40000027, op: VSSEG3E8_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG4E16.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0x60005027, op: VSSEG4E16_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG4E32.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0x60006027, op: VSSEG4E32_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG4E64.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0x60007027, op: VSSEG4E64_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG4E8.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0x60000027, op: VSSEG4E8_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG5E16.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0x80005027, op: VSSEG5E16_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG5E32.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0x80006027, op: VSSEG5E32_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG5E64.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0x80007027, op: VSSEG5E64_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG5E8.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0x80000027, op: VSSEG5E8_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG6E16.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0xa0005027, op: VSSEG6E16_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG6E32.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0xa0006027, op: VSSEG6E32_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG6E64.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0xa0007027, op: VSSEG6E64_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG6E8.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0xa0000027, op: VSSEG6E8_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG7E16.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0xc0005027, op: VSSEG7E16_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG7E32.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0xc0006027, op: VSSEG7E32_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG7E64.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0xc0007027, op: VSSEG7E64_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG7E8.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0xc0000027, op: VSSEG7E8_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG8E16.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0xe0005027, op: VSSEG8E16_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG8E32.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0xe0006027, op: VSSEG8E32_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG8E64.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0xe0007027, op: VSSEG8E64_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSEG8E8.V vm, rs1_ptr, vs3 + {mask: 0xfdf0707f, value: 0xe0000027, op: VSSEG8E8_V, args: argTypeList{arg_vm, arg_rs1_ptr, arg_vs3}}, + // VSSRA.VI vm, vs2, zimm5, vd + {mask: 0xfc00707f, value: 0xac003057, op: VSSRA_VI, args: argTypeList{arg_vm, arg_vs2, arg_zimm5, arg_vd}}, + // VSSRA.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xac000057, op: VSSRA_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VSSRA.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xac004057, op: VSSRA_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VSSRL.VI vm, vs2, zimm5, vd + {mask: 0xfc00707f, value: 0xa8003057, op: VSSRL_VI, args: argTypeList{arg_vm, arg_vs2, arg_zimm5, arg_vd}}, + // VSSRL.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xa8000057, op: VSSRL_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VSSRL.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xa8004057, op: VSSRL_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VSSSEG2E16.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x28005027, op: VSSSEG2E16_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG2E32.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x28006027, op: VSSSEG2E32_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG2E64.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x28007027, op: VSSSEG2E64_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG2E8.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x28000027, op: VSSSEG2E8_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG3E16.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x48005027, op: VSSSEG3E16_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG3E32.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x48006027, op: VSSSEG3E32_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG3E64.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x48007027, op: VSSSEG3E64_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG3E8.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x48000027, op: VSSSEG3E8_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG4E16.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x68005027, op: VSSSEG4E16_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG4E32.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x68006027, op: VSSSEG4E32_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG4E64.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x68007027, op: VSSSEG4E64_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG4E8.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x68000027, op: VSSSEG4E8_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG5E16.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x88005027, op: VSSSEG5E16_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG5E32.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x88006027, op: VSSSEG5E32_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG5E64.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x88007027, op: VSSSEG5E64_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG5E8.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x88000027, op: VSSSEG5E8_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG6E16.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xa8005027, op: VSSSEG6E16_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG6E32.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xa8006027, op: VSSSEG6E32_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG6E64.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xa8007027, op: VSSSEG6E64_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG6E8.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xa8000027, op: VSSSEG6E8_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG7E16.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xc8005027, op: VSSSEG7E16_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG7E32.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xc8006027, op: VSSSEG7E32_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG7E64.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xc8007027, op: VSSSEG7E64_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG7E8.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xc8000027, op: VSSSEG7E8_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG8E16.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xe8005027, op: VSSSEG8E16_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG8E32.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xe8006027, op: VSSSEG8E32_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG8E64.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xe8007027, op: VSSSEG8E64_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSSEG8E8.V vm, rs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xe8000027, op: VSSSEG8E8_V, args: argTypeList{arg_vm, arg_rs2, arg_rs1_ptr, arg_vs3}}, + // VSSUBU.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x88000057, op: VSSUBU_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VSSUBU.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x88004057, op: VSSUBU_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VSSUB.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x8c000057, op: VSSUB_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VSSUB.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x8c004057, op: VSSUB_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VSUB.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x08000057, op: VSUB_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VSUB.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x08004057, op: VSUB_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VSUXEI16.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x04005027, op: VSUXEI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXEI32.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x04006027, op: VSUXEI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXEI64.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x04007027, op: VSUXEI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXEI8.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x04000027, op: VSUXEI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG2EI16.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x24005027, op: VSUXSEG2EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG2EI32.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x24006027, op: VSUXSEG2EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG2EI64.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x24007027, op: VSUXSEG2EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG2EI8.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x24000027, op: VSUXSEG2EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG3EI16.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x44005027, op: VSUXSEG3EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG3EI32.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x44006027, op: VSUXSEG3EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG3EI64.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x44007027, op: VSUXSEG3EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG3EI8.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x44000027, op: VSUXSEG3EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG4EI16.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x64005027, op: VSUXSEG4EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG4EI32.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x64006027, op: VSUXSEG4EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG4EI64.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x64007027, op: VSUXSEG4EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG4EI8.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x64000027, op: VSUXSEG4EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG5EI16.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x84005027, op: VSUXSEG5EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG5EI32.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x84006027, op: VSUXSEG5EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG5EI64.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x84007027, op: VSUXSEG5EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG5EI8.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0x84000027, op: VSUXSEG5EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG6EI16.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xa4005027, op: VSUXSEG6EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG6EI32.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xa4006027, op: VSUXSEG6EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG6EI64.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xa4007027, op: VSUXSEG6EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG6EI8.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xa4000027, op: VSUXSEG6EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG7EI16.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xc4005027, op: VSUXSEG7EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG7EI32.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xc4006027, op: VSUXSEG7EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG7EI64.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xc4007027, op: VSUXSEG7EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG7EI8.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xc4000027, op: VSUXSEG7EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG8EI16.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xe4005027, op: VSUXSEG8EI16_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG8EI32.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xe4006027, op: VSUXSEG8EI32_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG8EI64.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xe4007027, op: VSUXSEG8EI64_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VSUXSEG8EI8.V vm, vs2, rs1_ptr, vs3 + {mask: 0xfc00707f, value: 0xe4000027, op: VSUXSEG8EI8_V, args: argTypeList{arg_vm, arg_vs2, arg_rs1_ptr, arg_vs3}}, + // VWADDU.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xc0002057, op: VWADDU_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VWADDU.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xc0006057, op: VWADDU_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VWADDU.WV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xd0002057, op: VWADDU_WV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VWADDU.WX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xd0006057, op: VWADDU_WX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VWADD.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xc4002057, op: VWADD_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VWADD.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xc4006057, op: VWADD_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VWADD.WV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xd4002057, op: VWADD_WV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VWADD.WX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xd4006057, op: VWADD_WX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VWMACCSU.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xfc002057, op: VWMACCSU_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VWMACCSU.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xfc006057, op: VWMACCSU_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VWMACCUS.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xf8006057, op: VWMACCUS_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VWMACCU.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xf0002057, op: VWMACCU_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VWMACCU.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xf0006057, op: VWMACCU_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VWMACC.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xf4002057, op: VWMACC_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VWMACC.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xf4006057, op: VWMACC_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VWMULSU.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xe8002057, op: VWMULSU_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VWMULSU.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xe8006057, op: VWMULSU_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VWMULU.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xe0002057, op: VWMULU_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VWMULU.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xe0006057, op: VWMULU_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VWMUL.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xec002057, op: VWMUL_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VWMUL.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xec006057, op: VWMUL_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VWREDSUMU.VS vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xc0000057, op: VWREDSUMU_VS, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VWREDSUM.VS vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xc4000057, op: VWREDSUM_VS, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VWSUBU.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xc8002057, op: VWSUBU_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VWSUBU.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xc8006057, op: VWSUBU_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VWSUBU.WV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xd8002057, op: VWSUBU_WV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VWSUBU.WX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xd8006057, op: VWSUBU_WX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VWSUB.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xcc002057, op: VWSUB_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VWSUB.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xcc006057, op: VWSUB_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VWSUB.WV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0xdc002057, op: VWSUB_WV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VWSUB.WX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0xdc006057, op: VWSUB_WX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VXOR.VI vm, vs2, simm5, vd + {mask: 0xfc00707f, value: 0x2c003057, op: VXOR_VI, args: argTypeList{arg_vm, arg_vs2, arg_simm5, arg_vd}}, + // VXOR.VV vm, vs2, vs1, vd + {mask: 0xfc00707f, value: 0x2c000057, op: VXOR_VV, args: argTypeList{arg_vm, arg_vs2, arg_vs1, arg_vd}}, + // VXOR.VX vm, vs2, rs1, vd + {mask: 0xfc00707f, value: 0x2c004057, op: VXOR_VX, args: argTypeList{arg_vm, arg_vs2, arg_rs1, arg_vd}}, + // VZEXT.VF2 vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x48032057, op: VZEXT_VF2, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VZEXT.VF4 vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x48022057, op: VZEXT_VF4, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, + // VZEXT.VF8 vm, vs2, vd + {mask: 0xfc0ff07f, value: 0x48012057, op: VZEXT_VF8, args: argTypeList{arg_vm, arg_vs2, arg_vd}}, // XNOR rd, rs1, rs2 {mask: 0xfe00707f, value: 0x40004033, op: XNOR, args: argTypeList{arg_rd, arg_rs1, arg_rs2}}, // XOR rd, rs1, rs2 diff --git a/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/vector.go b/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/vector.go new file mode 100644 index 00000000000..ee370c795b2 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/arch/riscv64/riscv64asm/vector.go @@ -0,0 +1,142 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package riscv64asm + +// This file contains some utility functions that can be used to decode +// vector instructions into both gnu and plan9 assembly. + +func implicitMask(instOp Op) bool { + switch instOp { + case VADC_VIM, VADC_VVM, VADC_VXM, VFMERGE_VFM, VMADC_VIM, VMADC_VVM, + VMADC_VXM, VMERGE_VIM, VMERGE_VVM, VMERGE_VXM, VMSBC_VVM, VMSBC_VXM, + VSBC_VVM, VSBC_VXM: + return true + + default: + return false + } +} + +func imaOrFma(instOp Op) bool { + switch instOp { + case VFMACC_VF, VFMACC_VV, VFMADD_VF, VFMADD_VV, VFMSAC_VF, VFMSAC_VV, + VFMSUB_VF, VFMSUB_VV, VFNMACC_VF, VFNMACC_VV, VFNMADD_VF, VFNMADD_VV, + VFNMSAC_VF, VFNMSAC_VV, VFNMSUB_VF, VFNMSUB_VV, VFWMACC_VF, VFWMACC_VV, + VFWMSAC_VF, VFWMSAC_VV, VFWNMACC_VF, VFWNMACC_VV, VFWNMSAC_VF, + VFWNMSAC_VV, VMACC_VV, VMACC_VX, VMADD_VV, VMADD_VX, VNMSAC_VV, + VNMSAC_VX, VNMSUB_VV, VNMSUB_VX, VWMACCSU_VV, VWMACCSU_VX, VWMACCUS_VX, + VWMACCU_VV, VWMACCU_VX, VWMACC_VV, VWMACC_VX: + return true + + default: + return false + } +} + +func pseudoRVVLoad(instOp Op) string { + switch instOp { + case VL1RE8_V: + return "VL1R.V" + + case VL2RE8_V: + return "VL2R.V" + + case VL4RE8_V: + return "VL4R.V" + + case VL8RE8_V: + return "VL8R.V" + } + + return "" +} + +func pseudoRVVArith(instOp Op, rawArgs []Arg, args []string) (string, []string) { + var op string + + switch instOp { + case VRSUB_VX: + if v, ok := rawArgs[1].(Reg); ok && v == X0 { + op = "VNEG.V" + args = append(args[:1], args[2:]...) + } + + case VWADD_VX: + if v, ok := rawArgs[1].(Reg); ok && v == X0 { + op = "VWCVT.X.X.V" + args = append(args[:1], args[2:]...) + } + + case VWADDU_VX: + if v, ok := rawArgs[1].(Reg); ok && v == X0 { + op = "VWCVTU.X.X.V" + args = append(args[:1], args[2:]...) + } + + case VXOR_VI: + if v, ok := rawArgs[1].(Simm); ok && v.Imm == -1 { + op = "VNOT.V" + args = append(args[:1], args[2:]...) + } + + case VNSRL_WX: + if v, ok := rawArgs[1].(Reg); ok && v == X0 { + op = "VNCVT.X.X.W" + args = append(args[:1], args[2:]...) + } + + case VFSGNJN_VV: + vs2, ok1 := rawArgs[0].(Reg) + vs1, ok2 := rawArgs[1].(Reg) + if ok1 && ok2 && vs1 == vs2 { + op = "VFNEG.V" + args = args[1:] + } + + case VFSGNJX_VV: + vs2, ok1 := rawArgs[0].(Reg) + vs1, ok2 := rawArgs[1].(Reg) + if ok1 && ok2 && vs1 == vs2 { + op = "VFABS.V" + args = args[1:] + } + + case VMAND_MM: + vs2, ok1 := rawArgs[0].(Reg) + vs1, ok2 := rawArgs[1].(Reg) + if ok1 && ok2 && vs1 == vs2 { + op = "VMMV.M" + args = args[1:] + } + + case VMXOR_MM: + vs2, ok1 := rawArgs[0].(Reg) + vs1, ok2 := rawArgs[1].(Reg) + vd, ok3 := rawArgs[2].(Reg) + if ok1 && ok2 && ok3 && vs1 == vs2 && vd == vs1 { + op = "VMCLR.M" + args = args[2:] + } + + case VMXNOR_MM: + vs2, ok1 := rawArgs[0].(Reg) + vs1, ok2 := rawArgs[1].(Reg) + vd, ok3 := rawArgs[2].(Reg) + if ok1 && ok2 && ok3 && vs1 == vs2 && vd == vs1 { + op = "VMSET.M" + args = args[2:] + } + + case VMNAND_MM: + vs2, ok1 := rawArgs[0].(Reg) + vs1, ok2 := rawArgs[1].(Reg) + if ok1 && ok2 && vs1 == vs2 { + op = "VMNOT.M" + args = args[1:] + } + } + + return op, args +} diff --git a/src/cmd/vendor/golang.org/x/sys/unix/affinity_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/affinity_linux.go index 3c7a6d6e2f1..3ea470387bc 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/affinity_linux.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/affinity_linux.go @@ -41,6 +41,15 @@ func (s *CPUSet) Zero() { clear(s[:]) } +// Fill adds all possible CPU bits to the set s. On Linux, [SchedSetaffinity] +// will silently ignore any invalid CPU bits in [CPUSet] so this is an +// efficient way of resetting the CPU affinity of a process. +func (s *CPUSet) Fill() { + for i := range s { + s[i] = ^cpuMask(0) + } +} + func cpuBitsIndex(cpu int) int { return cpu / _NCPUBITS } diff --git a/src/cmd/vendor/golang.org/x/sys/unix/fdset.go b/src/cmd/vendor/golang.org/x/sys/unix/fdset.go index 9e83d18cd04..62ed12645f4 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/fdset.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/fdset.go @@ -23,7 +23,5 @@ func (fds *FdSet) IsSet(fd int) bool { // Zero clears the set fds. func (fds *FdSet) Zero() { - for i := range fds.Bits { - fds.Bits[i] = 0 - } + clear(fds.Bits[:]) } diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ifreq_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/ifreq_linux.go index 848840ae4c7..309f5a2b0c7 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/ifreq_linux.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -111,9 +111,7 @@ func (ifr *Ifreq) SetUint32(v uint32) { // clear zeroes the ifreq's union field to prevent trailing garbage data from // being sent to the kernel if an ifreq is reused. func (ifr *Ifreq) clear() { - for i := range ifr.raw.Ifru { - ifr.raw.Ifru[i] = 0 - } + clear(ifr.raw.Ifru[:]) } // TODO(mdlayher): export as IfreqData? For now we can provide helpers such as diff --git a/src/cmd/vendor/golang.org/x/sys/unix/mkall.sh b/src/cmd/vendor/golang.org/x/sys/unix/mkall.sh index e6f31d374df..d0ed6119129 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/mkall.sh +++ b/src/cmd/vendor/golang.org/x/sys/unix/mkall.sh @@ -49,6 +49,7 @@ esac if [[ "$GOOS" = "linux" ]]; then # Use the Docker-based build system # Files generated through docker (use $cmd so you can Ctl-C the build or run) + set -e $cmd docker build --tag generate:$GOOS $GOOS $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS exit diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go index 4958a657085..9439af961d9 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -801,9 +801,7 @@ func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) { // one. The kernel expects SID to be in network byte order. binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID) copy(sa.raw[8:14], sa.Remote) - for i := 14; i < 14+IFNAMSIZ; i++ { - sa.raw[i] = 0 - } + clear(sa.raw[14 : 14+IFNAMSIZ]) copy(sa.raw[14:], sa.Dev) return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil } diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 88162099af5..34a46769730 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -248,6 +248,23 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { return Statvfs1(path, buf, ST_WAIT) } +func Getvfsstat(buf []Statvfs_t, flags int) (n int, err error) { + var ( + _p0 unsafe.Pointer + bufsize uintptr + ) + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statvfs_t{}) * uintptr(len(buf)) + } + r0, _, e1 := Syscall(SYS_GETVFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + /* * Exposed directly */ diff --git a/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go b/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go index 640f6b153f0..bd513373060 100644 --- a/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -321,6 +321,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW +//sys GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) = kernel32.GetNumberOfConsoleInputEvents +//sys FlushConsoleInputBuffer(console Handle) (err error) = kernel32.FlushConsoleInputBuffer //sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot //sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW diff --git a/src/cmd/vendor/golang.org/x/sys/windows/types_windows.go b/src/cmd/vendor/golang.org/x/sys/windows/types_windows.go index 993a2297dbe..358be3c7f5e 100644 --- a/src/cmd/vendor/golang.org/x/sys/windows/types_windows.go +++ b/src/cmd/vendor/golang.org/x/sys/windows/types_windows.go @@ -65,6 +65,22 @@ var signals = [...]string{ 15: "terminated", } +// File flags for [os.OpenFile]. The O_ prefix is used to indicate +// that these flags are specific to the OpenFile function. +const ( + O_FILE_FLAG_OPEN_NO_RECALL = FILE_FLAG_OPEN_NO_RECALL + O_FILE_FLAG_OPEN_REPARSE_POINT = FILE_FLAG_OPEN_REPARSE_POINT + O_FILE_FLAG_SESSION_AWARE = FILE_FLAG_SESSION_AWARE + O_FILE_FLAG_POSIX_SEMANTICS = FILE_FLAG_POSIX_SEMANTICS + O_FILE_FLAG_BACKUP_SEMANTICS = FILE_FLAG_BACKUP_SEMANTICS + O_FILE_FLAG_DELETE_ON_CLOSE = FILE_FLAG_DELETE_ON_CLOSE + O_FILE_FLAG_SEQUENTIAL_SCAN = FILE_FLAG_SEQUENTIAL_SCAN + O_FILE_FLAG_RANDOM_ACCESS = FILE_FLAG_RANDOM_ACCESS + O_FILE_FLAG_NO_BUFFERING = FILE_FLAG_NO_BUFFERING + O_FILE_FLAG_OVERLAPPED = FILE_FLAG_OVERLAPPED + O_FILE_FLAG_WRITE_THROUGH = FILE_FLAG_WRITE_THROUGH +) + const ( FILE_READ_DATA = 0x00000001 FILE_READ_ATTRIBUTES = 0x00000080 diff --git a/src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 641a5f4b775..426151a0193 100644 --- a/src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -238,6 +238,7 @@ var ( procFindResourceW = modkernel32.NewProc("FindResourceW") procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") + procFlushConsoleInputBuffer = modkernel32.NewProc("FlushConsoleInputBuffer") procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") procFormatMessageW = modkernel32.NewProc("FormatMessageW") @@ -284,6 +285,7 @@ var ( procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId") + procGetNumberOfConsoleInputEvents = modkernel32.NewProc("GetNumberOfConsoleInputEvents") procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") procGetProcAddress = modkernel32.NewProc("GetProcAddress") @@ -2111,6 +2113,14 @@ func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { return } +func FlushConsoleInputBuffer(console Handle) (err error) { + r1, _, e1 := syscall.SyscallN(procFlushConsoleInputBuffer.Addr(), uintptr(console)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func FlushFileBuffers(handle Handle) (err error) { r1, _, e1 := syscall.SyscallN(procFlushFileBuffers.Addr(), uintptr(handle)) if r1 == 0 { @@ -2481,6 +2491,14 @@ func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err erro return } +func GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetNumberOfConsoleInputEvents.Addr(), uintptr(console), uintptr(unsafe.Pointer(numevents))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { var _p0 uint32 if wait { diff --git a/src/cmd/vendor/golang.org/x/telemetry/internal/crashmonitor/monitor.go b/src/cmd/vendor/golang.org/x/telemetry/internal/crashmonitor/monitor.go index 53966ad2bcb..ac22f68c34c 100644 --- a/src/cmd/vendor/golang.org/x/telemetry/internal/crashmonitor/monitor.go +++ b/src/cmd/vendor/golang.org/x/telemetry/internal/crashmonitor/monitor.go @@ -326,11 +326,3 @@ func parseStackPCs(crash string) ([]uintptr, error) { } return pcs, nil } - -func min(x, y int) int { - if x < y { - return x - } else { - return y - } -} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go index ce92892c817..0ca27316e62 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go @@ -17,10 +17,26 @@ import ( const help = `PROGNAME is a tool for static analysis of Go programs. -PROGNAME examines Go source code and reports suspicious constructs, -such as Printf calls whose arguments do not align with the format -string. It uses heuristics that do not guarantee all reports are -genuine problems, but it can find errors not caught by the compilers. +PROGNAME examines Go source code and reports diagnostics for +suspicious constructs or opportunities for improvement. +Diagnostics may include suggested fixes. + +An example of a suspicious construct is a Printf call whose arguments +do not align with the format string. Analyzers may use heuristics that +do not guarantee all reports are genuine problems, but can find +mistakes not caught by the compiler. + +An example of an opportunity for improvement is a loop over +strings.Split(doc, "\n"), which may be replaced by a loop over the +strings.SplitSeq iterator, avoiding an array allocation. +Diagnostics in such cases may report non-problems, +but should carry fixes that may be safely applied. + +For analyzers of the first kind, use "go vet -vettool=PROGRAM" +to run the tool and report diagnostics. + +For analyzers of the second kind, use "go fix -fixtool=PROGRAM" +to run the tool and apply the fixes it suggests. ` // Help implements the help subcommand for a multichecker or unitchecker @@ -29,7 +45,7 @@ genuine problems, but it can find errors not caught by the compilers. func Help(progname string, analyzers []*analysis.Analyzer, args []string) { // No args: show summary of all analyzers. if len(args) == 0 { - fmt.Println(strings.Replace(help, "PROGNAME", progname, -1)) + fmt.Println(strings.ReplaceAll(help, "PROGNAME", progname)) fmt.Println("Registered analyzers:") fmt.Println() sort.Slice(analyzers, func(i, j int) bool { diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go index e554c3cc903..b4e91edce3b 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go @@ -13,9 +13,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysisinternal" ) //go:embed doc.go @@ -23,7 +23,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "appends", - Doc: analysisutil.MustExtractDoc(doc, "appends"), + Doc: analysisinternal.MustExtractDoc(doc, "appends"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/appends", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go index efbf05d596a..e9c08798449 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go @@ -19,7 +19,7 @@ import ( "strings" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/internal/analysisinternal" ) const Doc = "report mismatches between assembly files and Go declarations" @@ -175,7 +175,7 @@ func run(pass *analysis.Pass) (any, error) { Files: for _, fname := range sfiles { - content, tf, err := analysisutil.ReadFile(pass, fname) + content, tf, err := analysisinternal.ReadFile(pass, fname) if err != nil { return nil, err } @@ -211,7 +211,7 @@ Files: resultStr = "result register" } for _, line := range retLine { - pass.Reportf(analysisutil.LineStart(tf, line), "[%s] %s: RET without writing to %s", arch, fnName, resultStr) + pass.Reportf(tf.LineStart(line), "[%s] %s: RET without writing to %s", arch, fnName, resultStr) } } retLine = nil @@ -227,7 +227,7 @@ Files: lineno++ badf := func(format string, args ...any) { - pass.Reportf(analysisutil.LineStart(tf, lineno), "[%s] %s: %s", arch, fnName, fmt.Sprintf(format, args...)) + pass.Reportf(tf.LineStart(lineno), "[%s] %s: %s", arch, fnName, fmt.Sprintf(format, args...)) } if arch == "" { diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go index 1914bb47616..8080aed020e 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go @@ -17,9 +17,11 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/refactor" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -27,26 +29,26 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "assign", - Doc: analysisutil.MustExtractDoc(doc, "assign"), + Doc: analysisinternal.MustExtractDoc(doc, "assign"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/assign", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } func run(pass *analysis.Pass) (any, error) { - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + var ( + inspect = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + info = pass.TypesInfo + ) - nodeFilter := []ast.Node{ - (*ast.AssignStmt)(nil), - } - inspect.Preorder(nodeFilter, func(n ast.Node) { - stmt := n.(*ast.AssignStmt) + for curAssign := range inspect.Root().Preorder((*ast.AssignStmt)(nil)) { + stmt := curAssign.Node().(*ast.AssignStmt) if stmt.Tok != token.ASSIGN { - return // ignore := + continue // ignore := } if len(stmt.Lhs) != len(stmt.Rhs) { // If LHS and RHS have different cardinality, they can't be the same. - return + continue } // Delete redundant LHS, RHS pairs, taking care @@ -61,13 +63,13 @@ func run(pass *analysis.Pass) (any, error) { isSelfAssign := false var le string - if !analysisutil.HasSideEffects(pass.TypesInfo, lhs) && - !analysisutil.HasSideEffects(pass.TypesInfo, rhs) && - !isMapIndex(pass.TypesInfo, lhs) && + if typesinternal.NoEffects(info, lhs) && + typesinternal.NoEffects(info, rhs) && + !isMapIndex(info, lhs) && reflect.TypeOf(lhs) == reflect.TypeOf(rhs) { // short-circuit the heavy-weight gofmt check - le = analysisinternal.Format(pass.Fset, lhs) - re := analysisinternal.Format(pass.Fset, rhs) + le = astutil.Format(pass.Fset, lhs) + re := astutil.Format(pass.Fset, rhs) if le == re { isSelfAssign = true } @@ -109,13 +111,14 @@ func run(pass *analysis.Pass) (any, error) { } if len(exprs) == 0 { - return + continue } if len(exprs) == len(stmt.Lhs) { // If every part of the statement is a self-assignment, // remove the whole statement. - edits = []analysis.TextEdit{{Pos: stmt.Pos(), End: stmt.End()}} + tokFile := pass.Fset.File(stmt.Pos()) + edits = refactor.DeleteStmt(tokFile, curAssign) } pass.Report(analysis.Diagnostic{ @@ -126,7 +129,7 @@ func run(pass *analysis.Pass) (any, error) { TextEdits: edits, }}, }) - }) + } return nil, nil } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go index 82d5439ce57..9faa3f67c1d 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go @@ -11,10 +11,11 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -22,7 +23,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "atomic", - Doc: analysisutil.MustExtractDoc(doc, "atomic"), + Doc: analysisinternal.MustExtractDoc(doc, "atomic"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/atomic", Requires: []*analysis.Analyzer{inspect.Analyzer}, RunDespiteErrors: true, @@ -30,7 +31,7 @@ var Analyzer = &analysis.Analyzer{ } func run(pass *analysis.Pass) (any, error) { - if !analysisinternal.Imports(pass.Pkg, "sync/atomic") { + if !typesinternal.Imports(pass.Pkg, "sync/atomic") { return nil, nil // doesn't directly import sync/atomic } @@ -54,7 +55,7 @@ func run(pass *analysis.Pass) (any, error) { continue } obj := typeutil.Callee(pass.TypesInfo, call) - if analysisinternal.IsFunctionNamed(obj, "sync/atomic", "AddInt32", "AddInt64", "AddUint32", "AddUint64", "AddUintptr") { + if typesinternal.IsFunctionNamed(obj, "sync/atomic", "AddInt32", "AddInt64", "AddUint32", "AddUint64", "AddUintptr") { checkAtomicAddAssignment(pass, n.Lhs[i], call) } } @@ -72,7 +73,7 @@ func checkAtomicAddAssignment(pass *analysis.Pass, left ast.Expr, call *ast.Call arg := call.Args[0] broken := false - gofmt := func(e ast.Expr) string { return analysisinternal.Format(pass.Fset, e) } + gofmt := func(e ast.Expr) string { return astutil.Format(pass.Fset, e) } if uarg, ok := arg.(*ast.UnaryExpr); ok && uarg.Op == token.AND { broken = gofmt(left) == gofmt(uarg.X) diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go index e1cf9f9b7ad..574fafaa95d 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go @@ -13,9 +13,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/typesinternal" ) const Doc = "check for common mistakes involving boolean operators" @@ -84,7 +84,7 @@ func (op boolOp) commutativeSets(info *types.Info, e *ast.BinaryExpr, seen map[* i := 0 var sets [][]ast.Expr for j := 0; j <= len(exprs); j++ { - if j == len(exprs) || analysisutil.HasSideEffects(info, exprs[j]) { + if j == len(exprs) || !typesinternal.NoEffects(info, exprs[j]) { if i < j { sets = append(sets, exprs[i:j]) } @@ -104,7 +104,7 @@ func (op boolOp) commutativeSets(info *types.Info, e *ast.BinaryExpr, seen map[* func (op boolOp) checkRedundant(pass *analysis.Pass, exprs []ast.Expr) { seen := make(map[string]bool) for _, e := range exprs { - efmt := analysisinternal.Format(pass.Fset, e) + efmt := astutil.Format(pass.Fset, e) if seen[efmt] { pass.ReportRangef(e, "redundant %s: %s %s %s", op.name, efmt, op.tok, efmt) } else { @@ -150,8 +150,8 @@ func (op boolOp) checkSuspect(pass *analysis.Pass, exprs []ast.Expr) { } // e is of the form 'x != c' or 'x == c'. - xfmt := analysisinternal.Format(pass.Fset, x) - efmt := analysisinternal.Format(pass.Fset, e) + xfmt := astutil.Format(pass.Fset, x) + efmt := astutil.Format(pass.Fset, e) if prev, found := seen[xfmt]; found { // checkRedundant handles the case in which efmt == prev. if efmt != prev { diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go index 6e32f298dc2..7dd4f249e25 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go @@ -14,7 +14,7 @@ import ( "unicode" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/internal/analysisinternal" ) const Doc = "check //go:build and // +build directives" @@ -86,7 +86,7 @@ func checkOtherFile(pass *analysis.Pass, filename string) error { // We cannot use the Go parser, since this may not be a Go source file. // Read the raw bytes instead. - content, tf, err := analysisutil.ReadFile(pass, filename) + content, tf, err := analysisinternal.ReadFile(pass, filename) if err != nil { return err } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go index d9189b5b696..bf1202b92b7 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go @@ -18,7 +18,7 @@ import ( "strconv" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/typesinternal" ) const debug = false @@ -41,7 +41,7 @@ var Analyzer = &analysis.Analyzer{ } func run(pass *analysis.Pass) (any, error) { - if !analysisinternal.Imports(pass.Pkg, "runtime/cgo") { + if !typesinternal.Imports(pass.Pkg, "runtime/cgo") { return nil, nil // doesn't use cgo } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go index d35b85f03a7..4190cc5900f 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go @@ -16,8 +16,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal" "golang.org/x/tools/internal/versions" ) @@ -86,7 +87,7 @@ func checkCopyLocksAssign(pass *analysis.Pass, assign *ast.AssignStmt, goversion lhs := assign.Lhs for i, x := range assign.Rhs { if path := lockPathRhs(pass, x); path != nil { - pass.ReportRangef(x, "assignment copies lock value to %v: %v", analysisinternal.Format(pass.Fset, assign.Lhs[i]), path) + pass.ReportRangef(x, "assignment copies lock value to %v: %v", astutil.Format(pass.Fset, assign.Lhs[i]), path) lhs = nil // An lhs has been reported. We prefer the assignment warning and do not report twice. } } @@ -100,7 +101,7 @@ func checkCopyLocksAssign(pass *analysis.Pass, assign *ast.AssignStmt, goversion if id, ok := l.(*ast.Ident); ok && id.Name != "_" { if obj := pass.TypesInfo.Defs[id]; obj != nil && obj.Type() != nil { if path := lockPath(pass.Pkg, obj.Type(), nil); path != nil { - pass.ReportRangef(l, "for loop iteration copies lock value to %v: %v", analysisinternal.Format(pass.Fset, l), path) + pass.ReportRangef(l, "for loop iteration copies lock value to %v: %v", astutil.Format(pass.Fset, l), path) } } } @@ -132,7 +133,7 @@ func checkCopyLocksCompositeLit(pass *analysis.Pass, cl *ast.CompositeLit) { x = node.Value } if path := lockPathRhs(pass, x); path != nil { - pass.ReportRangef(x, "literal copies lock value from %v: %v", analysisinternal.Format(pass.Fset, x), path) + pass.ReportRangef(x, "literal copies lock value from %v: %v", astutil.Format(pass.Fset, x), path) } } } @@ -166,7 +167,7 @@ func checkCopyLocksCallExpr(pass *analysis.Pass, ce *ast.CallExpr) { } for _, x := range ce.Args { if path := lockPathRhs(pass, x); path != nil { - pass.ReportRangef(x, "call of %s copies lock value: %v", analysisinternal.Format(pass.Fset, ce.Fun), path) + pass.ReportRangef(x, "call of %s copies lock value: %v", astutil.Format(pass.Fset, ce.Fun), path) } } } @@ -233,7 +234,7 @@ func checkCopyLocksRangeVar(pass *analysis.Pass, rtok token.Token, e ast.Expr) { return } if path := lockPath(pass.Pkg, typ, nil); path != nil { - pass.Reportf(e.Pos(), "range var %s copies lock: %v", analysisinternal.Format(pass.Fset, e), path) + pass.Reportf(e.Pos(), "range var %s copies lock: %v", astutil.Format(pass.Fset, e), path) } } @@ -353,7 +354,7 @@ func lockPath(tpkg *types.Package, typ types.Type, seen map[types.Type]bool) typ // In go1.10, sync.noCopy did not implement Locker. // (The Unlock method was added only in CL 121876.) // TODO(adonovan): remove workaround when we drop go1.10. - if analysisinternal.IsTypeNamed(typ, "sync", "noCopy") { + if typesinternal.IsTypeNamed(typ, "sync", "noCopy") { return []string{typ.String()} } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go index e11957f2d09..3069ee9fecd 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go @@ -10,10 +10,10 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -23,20 +23,20 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "defers", Requires: []*analysis.Analyzer{inspect.Analyzer}, + Doc: analysisinternal.MustExtractDoc(doc, "defers"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/defers", - Doc: analysisutil.MustExtractDoc(doc, "defers"), Run: run, } func run(pass *analysis.Pass) (any, error) { - if !analysisinternal.Imports(pass.Pkg, "time") { + if !typesinternal.Imports(pass.Pkg, "time") { return nil, nil } checkDeferCall := func(node ast.Node) bool { switch v := node.(type) { case *ast.CallExpr: - if analysisinternal.IsFunctionNamed(typeutil.Callee(pass.TypesInfo, v), "time", "Since") { + if typesinternal.IsFunctionNamed(typeutil.Callee(pass.TypesInfo, v), "time", "Since") { pass.Reportf(v.Pos(), "call to time.Since is not deferred") } case *ast.FuncLit: diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go index bebec891408..c84d25842e3 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go @@ -14,7 +14,7 @@ import ( "unicode/utf8" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/internal/analysisinternal" ) const Doc = `check Go toolchain directives such as //go:debug @@ -86,7 +86,7 @@ func checkGoFile(pass *analysis.Pass, f *ast.File) { func checkOtherFile(pass *analysis.Pass, filename string) error { // We cannot use the Go parser, since is not a Go source file. // Read the raw bytes instead. - content, tf, err := analysisutil.ReadFile(pass, filename) + content, tf, err := analysisinternal.ReadFile(pass, filename) if err != nil { return err } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go index b8d29d019db..b3df99929dc 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go @@ -12,22 +12,20 @@ import ( "go/types" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/typesinternal/typeindex" ) const Doc = `report passing non-pointer or non-error values to errors.As -The errorsas analysis reports calls to errors.As where the type +The errorsas analyzer reports calls to errors.As where the type of the second argument is not a pointer to a type implementing error.` var Analyzer = &analysis.Analyzer{ Name: "errorsas", Doc: Doc, URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/errorsas", - Requires: []*analysis.Analyzer{inspect.Analyzer}, + Requires: []*analysis.Analyzer{typeindexanalyzer.Analyzer}, Run: run, } @@ -39,38 +37,31 @@ func run(pass *analysis.Pass) (any, error) { return nil, nil } - if !analysisinternal.Imports(pass.Pkg, "errors") { - return nil, nil // doesn't directly import errors - } + var ( + index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + info = pass.TypesInfo + ) - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - - nodeFilter := []ast.Node{ - (*ast.CallExpr)(nil), - } - inspect.Preorder(nodeFilter, func(n ast.Node) { - call := n.(*ast.CallExpr) - obj := typeutil.Callee(pass.TypesInfo, call) - if !analysisinternal.IsFunctionNamed(obj, "errors", "As") { - return - } + for curCall := range index.Calls(index.Object("errors", "As")) { + call := curCall.Node().(*ast.CallExpr) if len(call.Args) < 2 { - return // not enough arguments, e.g. called with return values of another function + continue // spread call: errors.As(pair()) } - if err := checkAsTarget(pass, call.Args[1]); err != nil { + + // Check for incorrect arguments. + if err := checkAsTarget(info, call.Args[1]); err != nil { pass.ReportRangef(call, "%v", err) + continue } - }) + } return nil, nil } -var errorType = types.Universe.Lookup("error").Type() - // checkAsTarget reports an error if the second argument to errors.As is invalid. -func checkAsTarget(pass *analysis.Pass, e ast.Expr) error { - t := pass.TypesInfo.Types[e].Type - if it, ok := t.Underlying().(*types.Interface); ok && it.NumMethods() == 0 { - // A target of interface{} is always allowed, since it often indicates +func checkAsTarget(info *types.Info, e ast.Expr) error { + t := info.Types[e].Type + if types.Identical(t.Underlying(), anyType) { + // A target of any is always allowed, since it often indicates // a value forwarded from another source. return nil } @@ -78,12 +69,16 @@ func checkAsTarget(pass *analysis.Pass, e ast.Expr) error { if !ok { return errors.New("second argument to errors.As must be a non-nil pointer to either a type that implements error, or to any interface type") } - if pt.Elem() == errorType { + if types.Identical(pt.Elem(), errorType) { return errors.New("second argument to errors.As should not be *error") } - _, ok = pt.Elem().Underlying().(*types.Interface) - if ok || types.Implements(pt.Elem(), errorType.Underlying().(*types.Interface)) { - return nil + if !types.IsInterface(pt.Elem()) && !types.AssignableTo(pt.Elem(), errorType) { + return errors.New("second argument to errors.As must be a non-nil pointer to either a type that implements error, or to any interface type") } - return errors.New("second argument to errors.As must be a non-nil pointer to either a type that implements error, or to any interface type") + return nil } + +var ( + anyType = types.Universe.Lookup("any").Type() + errorType = types.Universe.Lookup("error").Type() +) diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go index ff9c8b4f818..809095d40a5 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go @@ -13,7 +13,7 @@ import ( "unicode" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/internal/analysisinternal" ) const Doc = "report assembly that clobbers the frame pointer before saving it" @@ -98,7 +98,7 @@ func run(pass *analysis.Pass) (any, error) { } for _, fname := range sfiles { - content, tf, err := analysisutil.ReadFile(pass, fname) + content, tf, err := analysisinternal.ReadFile(pass, fname) if err != nil { return nil, err } @@ -127,7 +127,7 @@ func run(pass *analysis.Pass) (any, error) { } if arch.isFPWrite(line) { - pass.Reportf(analysisutil.LineStart(tf, lineno), "frame pointer is clobbered before saving") + pass.Reportf(tf.LineStart(lineno), "frame pointer is clobbered before saving") active = false continue } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go index e9acd96547e..37ecb6523bd 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go @@ -13,7 +13,6 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" "golang.org/x/tools/internal/typesinternal" ) @@ -46,7 +45,7 @@ func run(pass *analysis.Pass) (any, error) { // Fast path: if the package doesn't import net/http, // skip the traversal. - if !analysisinternal.Imports(pass.Pkg, "net/http") { + if !typesinternal.Imports(pass.Pkg, "net/http") { return nil, nil } @@ -118,7 +117,7 @@ func isHTTPFuncOrMethodOnClient(info *types.Info, expr *ast.CallExpr) bool { return false // the function called does not return two values. } isPtr, named := typesinternal.ReceiverNamed(res.At(0)) - if !isPtr || named == nil || !analysisinternal.IsTypeNamed(named, "net/http", "Response") { + if !isPtr || named == nil || !typesinternal.IsTypeNamed(named, "net/http", "Response") { return false // the first return type is not *http.Response. } @@ -133,11 +132,11 @@ func isHTTPFuncOrMethodOnClient(info *types.Info, expr *ast.CallExpr) bool { return ok && id.Name == "http" // function in net/http package. } - if analysisinternal.IsTypeNamed(typ, "net/http", "Client") { + if typesinternal.IsTypeNamed(typ, "net/http", "Client") { return true // method on http.Client. } ptr, ok := types.Unalias(typ).(*types.Pointer) - return ok && analysisinternal.IsTypeNamed(ptr.Elem(), "net/http", "Client") // method on *http.Client. + return ok && typesinternal.IsTypeNamed(ptr.Elem(), "net/http", "Client") // method on *http.Client. } // restOfBlock, given a traversal stack, finds the innermost containing diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go index 4022dbe7c22..a6dcf1cf8e8 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go @@ -11,8 +11,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysisinternal" "golang.org/x/tools/internal/typeparams" ) @@ -21,7 +21,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "ifaceassert", - Doc: analysisutil.MustExtractDoc(doc, "ifaceassert"), + Doc: analysisinternal.MustExtractDoc(doc, "ifaceassert"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/ifaceassert", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/inline/doc.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/inline/doc.go new file mode 100644 index 00000000000..a3e98cb6572 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/inline/doc.go @@ -0,0 +1,109 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package inline defines an analyzer that inlines calls to functions +and uses of constants marked with a "//go:fix inline" directive. + +# Analyzer inline + +inline: apply fixes based on 'go:fix inline' comment directives + +The inline analyzer inlines functions and constants that are marked for inlining. + +## Functions + +Given a function that is marked for inlining, like this one: + + //go:fix inline + func Square(x int) int { return Pow(x, 2) } + +this analyzer will recommend that calls to the function elsewhere, in the same +or other packages, should be inlined. + +Inlining can be used to move off of a deprecated function: + + // Deprecated: prefer Pow(x, 2). + //go:fix inline + func Square(x int) int { return Pow(x, 2) } + +It can also be used to move off of an obsolete package, +as when the import path has changed or a higher major version is available: + + package pkg + + import pkg2 "pkg/v2" + + //go:fix inline + func F() { pkg2.F(nil) } + +Replacing a call pkg.F() by pkg2.F(nil) can have no effect on the program, +so this mechanism provides a low-risk way to update large numbers of calls. +We recommend, where possible, expressing the old API in terms of the new one +to enable automatic migration. + +The inliner takes care to avoid behavior changes, even subtle ones, +such as changes to the order in which argument expressions are +evaluated. When it cannot safely eliminate all parameter variables, +it may introduce a "binding declaration" of the form + + var params = args + +to evaluate argument expressions in the correct order and bind them to +parameter variables. Since the resulting code transformation may be +stylistically suboptimal, such inlinings may be disabled by specifying +the -inline.allow_binding_decl=false flag to the analyzer driver. + +(In cases where it is not safe to "reduce" a call—that is, to replace +a call f(x) by the body of function f, suitably substituted—the +inliner machinery is capable of replacing f by a function literal, +func(){...}(). However, the inline analyzer discards all such +"literalizations" unconditionally, again on grounds of style.) + +## Constants + +Given a constant that is marked for inlining, like this one: + + //go:fix inline + const Ptr = Pointer + +this analyzer will recommend that uses of Ptr should be replaced with Pointer. + +As with functions, inlining can be used to replace deprecated constants and +constants in obsolete packages. + +A constant definition can be marked for inlining only if it refers to another +named constant. + +The "//go:fix inline" comment must appear before a single const declaration on its own, +as above; before a const declaration that is part of a group, as in this case: + + const ( + C = 1 + //go:fix inline + Ptr = Pointer + ) + +or before a group, applying to every constant in the group: + + //go:fix inline + const ( + Ptr = Pointer + Val = Value + ) + +The proposal https://go.dev/issue/32816 introduces the "//go:fix inline" directives. + +You can use this command to apply inline fixes en masse: + + $ go run golang.org/x/tools/go/analysis/passes/inline/cmd/inline@latest -fix ./... + +# Analyzer gofixdirective + +gofixdirective: validate uses of //go:fix comment directives + +The gofixdirective analyzer checks "//go:fix inline" directives for correctness. +See the documentation for the gofix analyzer for more about "/go:fix inline". +*/ +package inline diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/inline/gofix.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/inline/gofix.go new file mode 100644 index 00000000000..629d5d8526f --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/inline/gofix.go @@ -0,0 +1,537 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inline + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "slices" + "strings" + + _ "embed" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/gofixdirective" + "golang.org/x/tools/go/ast/edge" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/diff" + "golang.org/x/tools/internal/packagepath" + "golang.org/x/tools/internal/refactor" + "golang.org/x/tools/internal/refactor/inline" + "golang.org/x/tools/internal/typesinternal" +) + +//go:embed doc.go +var doc string + +var Analyzer = &analysis.Analyzer{ + Name: "inline", + Doc: analysisinternal.MustExtractDoc(doc, "inline"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/inline", + Run: run, + FactTypes: []analysis.Fact{ + (*goFixInlineFuncFact)(nil), + (*goFixInlineConstFact)(nil), + (*goFixInlineAliasFact)(nil), + }, + Requires: []*analysis.Analyzer{inspect.Analyzer}, +} + +var allowBindingDecl bool + +func init() { + Analyzer.Flags.BoolVar(&allowBindingDecl, "allow_binding_decl", false, + "permit inlinings that require a 'var params = args' declaration") +} + +// analyzer holds the state for this analysis. +type analyzer struct { + pass *analysis.Pass + root inspector.Cursor + // memoization of repeated calls for same file. + fileContent map[string][]byte + // memoization of fact imports (nil => no fact) + inlinableFuncs map[*types.Func]*inline.Callee + inlinableConsts map[*types.Const]*goFixInlineConstFact + inlinableAliases map[*types.TypeName]*goFixInlineAliasFact +} + +func run(pass *analysis.Pass) (any, error) { + a := &analyzer{ + pass: pass, + root: pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Root(), + fileContent: make(map[string][]byte), + inlinableFuncs: make(map[*types.Func]*inline.Callee), + inlinableConsts: make(map[*types.Const]*goFixInlineConstFact), + inlinableAliases: make(map[*types.TypeName]*goFixInlineAliasFact), + } + gofixdirective.Find(pass, a.root, a) + a.inline() + return nil, nil +} + +// HandleFunc exports a fact for functions marked with go:fix. +func (a *analyzer) HandleFunc(decl *ast.FuncDecl) { + content, err := a.readFile(decl) + if err != nil { + a.pass.Reportf(decl.Doc.Pos(), "invalid inlining candidate: cannot read source file: %v", err) + return + } + callee, err := inline.AnalyzeCallee(discard, a.pass.Fset, a.pass.Pkg, a.pass.TypesInfo, decl, content) + if err != nil { + a.pass.Reportf(decl.Doc.Pos(), "invalid inlining candidate: %v", err) + return + } + fn := a.pass.TypesInfo.Defs[decl.Name].(*types.Func) + a.pass.ExportObjectFact(fn, &goFixInlineFuncFact{callee}) + a.inlinableFuncs[fn] = callee +} + +// HandleAlias exports a fact for aliases marked with go:fix. +func (a *analyzer) HandleAlias(spec *ast.TypeSpec) { + // Remember that this is an inlinable alias. + typ := &goFixInlineAliasFact{} + lhs := a.pass.TypesInfo.Defs[spec.Name].(*types.TypeName) + a.inlinableAliases[lhs] = typ + // Create a fact only if the LHS is exported and defined at top level. + // We create a fact even if the RHS is non-exported, + // so we can warn about uses in other packages. + if lhs.Exported() && typesinternal.IsPackageLevel(lhs) { + a.pass.ExportObjectFact(lhs, typ) + } +} + +// HandleConst exports a fact for constants marked with go:fix. +func (a *analyzer) HandleConst(nameIdent, rhsIdent *ast.Ident) { + lhs := a.pass.TypesInfo.Defs[nameIdent].(*types.Const) + rhs := a.pass.TypesInfo.Uses[rhsIdent].(*types.Const) // must be so in a well-typed program + con := &goFixInlineConstFact{ + RHSName: rhs.Name(), + RHSPkgName: rhs.Pkg().Name(), + RHSPkgPath: rhs.Pkg().Path(), + } + if rhs.Pkg() == a.pass.Pkg { + con.rhsObj = rhs + } + a.inlinableConsts[lhs] = con + // Create a fact only if the LHS is exported and defined at top level. + // We create a fact even if the RHS is non-exported, + // so we can warn about uses in other packages. + if lhs.Exported() && typesinternal.IsPackageLevel(lhs) { + a.pass.ExportObjectFact(lhs, con) + } +} + +// inline inlines each static call to an inlinable function +// and each reference to an inlinable constant or type alias. +// +// TODO(adonovan): handle multiple diffs that each add the same import. +func (a *analyzer) inline() { + for cur := range a.root.Preorder((*ast.CallExpr)(nil), (*ast.Ident)(nil)) { + switch n := cur.Node().(type) { + case *ast.CallExpr: + a.inlineCall(n, cur) + + case *ast.Ident: + switch t := a.pass.TypesInfo.Uses[n].(type) { + case *types.TypeName: + a.inlineAlias(t, cur) + case *types.Const: + a.inlineConst(t, cur) + } + } + } +} + +// If call is a call to an inlinable func, suggest inlining its use at cur. +func (a *analyzer) inlineCall(call *ast.CallExpr, cur inspector.Cursor) { + if fn := typeutil.StaticCallee(a.pass.TypesInfo, call); fn != nil { + // Inlinable? + callee, ok := a.inlinableFuncs[fn] + if !ok { + var fact goFixInlineFuncFact + if a.pass.ImportObjectFact(fn, &fact) { + callee = fact.Callee + a.inlinableFuncs[fn] = callee + } + } + if callee == nil { + return // nope + } + + // Inline the call. + content, err := a.readFile(call) + if err != nil { + a.pass.Reportf(call.Lparen, "invalid inlining candidate: cannot read source file: %v", err) + return + } + curFile := astutil.EnclosingFile(cur) + caller := &inline.Caller{ + Fset: a.pass.Fset, + Types: a.pass.Pkg, + Info: a.pass.TypesInfo, + File: curFile, + Call: call, + Content: content, + } + res, err := inline.Inline(caller, callee, &inline.Options{Logf: discard}) + if err != nil { + a.pass.Reportf(call.Lparen, "%v", err) + return + } + + if res.Literalized { + // Users are not fond of inlinings that literalize + // f(x) to func() { ... }(), so avoid them. + // + // (Unfortunately the inliner is very timid, + // and often literalizes when it cannot prove that + // reducing the call is safe; the user of this tool + // has no indication of what the problem is.) + return + } + if res.BindingDecl && !allowBindingDecl { + // When applying fix en masse, users are similarly + // unenthusiastic about inlinings that cannot + // entirely eliminate the parameters and + // insert a 'var params = args' declaration. + // The flag allows them to decline such fixes. + return + } + got := res.Content + + // Suggest the "fix". + var textEdits []analysis.TextEdit + for _, edit := range diff.Bytes(content, got) { + textEdits = append(textEdits, analysis.TextEdit{ + Pos: curFile.FileStart + token.Pos(edit.Start), + End: curFile.FileStart + token.Pos(edit.End), + NewText: []byte(edit.New), + }) + } + a.pass.Report(analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fmt.Sprintf("Call of %v should be inlined", callee), + SuggestedFixes: []analysis.SuggestedFix{{ + Message: fmt.Sprintf("Inline call of %v", callee), + TextEdits: textEdits, + }}, + }) + } +} + +// If tn is the TypeName of an inlinable alias, suggest inlining its use at cur. +func (a *analyzer) inlineAlias(tn *types.TypeName, curId inspector.Cursor) { + inalias, ok := a.inlinableAliases[tn] + if !ok { + var fact goFixInlineAliasFact + if a.pass.ImportObjectFact(tn, &fact) { + inalias = &fact + a.inlinableAliases[tn] = inalias + } + } + if inalias == nil { + return // nope + } + + alias := tn.Type().(*types.Alias) + // Remember the names of the alias's type params. When we check for shadowing + // later, we'll ignore these because they won't appear in the replacement text. + typeParamNames := map[*types.TypeName]bool{} + for tp := range alias.TypeParams().TypeParams() { + typeParamNames[tp.Obj()] = true + } + rhs := alias.Rhs() + curPath := a.pass.Pkg.Path() + curFile := astutil.EnclosingFile(curId) + id := curId.Node().(*ast.Ident) + // We have an identifier A here (n), possibly qualified by a package + // identifier (sel.n), and an inlinable "type A = rhs" elsewhere. + // + // We can replace A with rhs if no name in rhs is shadowed at n's position, + // and every package in rhs is importable by the current package. + + var ( + importPrefixes = map[string]string{curPath: ""} // from pkg path to prefix + edits []analysis.TextEdit + ) + for _, tn := range typenames(rhs) { + // Ignore the type parameters of the alias: they won't appear in the result. + if typeParamNames[tn] { + continue + } + var pkgPath, pkgName string + if pkg := tn.Pkg(); pkg != nil { + pkgPath = pkg.Path() + pkgName = pkg.Name() + } + if pkgPath == "" || pkgPath == curPath { + // The name is in the current package or the universe scope, so no import + // is required. Check that it is not shadowed (that is, that the type + // it refers to in rhs is the same one it refers to at n). + scope := a.pass.TypesInfo.Scopes[curFile].Innermost(id.Pos()) // n's scope + _, obj := scope.LookupParent(tn.Name(), id.Pos()) // what qn.name means in n's scope + if obj != tn { + return + } + } else if !packagepath.CanImport(a.pass.Pkg.Path(), pkgPath) { + // If this package can't see the package of this part of rhs, we can't inline. + return + } else if _, ok := importPrefixes[pkgPath]; !ok { + // Use AddImport to add pkgPath if it's not there already. Associate the prefix it assigns + // with the package path for use by the TypeString qualifier below. + prefix, eds := refactor.AddImport( + a.pass.TypesInfo, curFile, pkgName, pkgPath, tn.Name(), id.Pos()) + importPrefixes[pkgPath] = strings.TrimSuffix(prefix, ".") + edits = append(edits, eds...) + } + } + // Find the complete identifier, which may take any of these forms: + // Id + // Id[T] + // Id[K, V] + // pkg.Id + // pkg.Id[T] + // pkg.Id[K, V] + var expr ast.Expr = id + if astutil.IsChildOf(curId, edge.SelectorExpr_Sel) { + curId = curId.Parent() + expr = curId.Node().(ast.Expr) + } + // If expr is part of an IndexExpr or IndexListExpr, we'll need that node. + // Given C[int], TypeOf(C) is generic but TypeOf(C[int]) is instantiated. + switch ek, _ := curId.ParentEdge(); ek { + case edge.IndexExpr_X: + expr = curId.Parent().Node().(*ast.IndexExpr) + case edge.IndexListExpr_X: + expr = curId.Parent().Node().(*ast.IndexListExpr) + } + t := a.pass.TypesInfo.TypeOf(expr).(*types.Alias) // type of entire identifier + if targs := t.TypeArgs(); targs.Len() > 0 { + // Instantiate the alias with the type args from this use. + // For example, given type A = M[K, V], compute the type of the use + // A[int, Foo] as M[int, Foo]. + // Don't validate instantiation: it can't panic unless we have a bug, + // in which case seeing the stack trace via telemetry would be helpful. + instAlias, _ := types.Instantiate(nil, alias, slices.Collect(targs.Types()), false) + rhs = instAlias.(*types.Alias).Rhs() + } + // To get the replacement text, render the alias RHS using the package prefixes + // we assigned above. + newText := types.TypeString(rhs, func(p *types.Package) string { + if p == a.pass.Pkg { + return "" + } + if prefix, ok := importPrefixes[p.Path()]; ok { + return prefix + } + panic(fmt.Sprintf("in %q, package path %q has no import prefix", rhs, p.Path())) + }) + a.reportInline("type alias", "Type alias", expr, edits, newText) +} + +// typenames returns the TypeNames for types within t (including t itself) that have +// them: basic types, named types and alias types. +// The same name may appear more than once. +func typenames(t types.Type) []*types.TypeName { + var tns []*types.TypeName + + var visit func(types.Type) + visit = func(t types.Type) { + if hasName, ok := t.(interface{ Obj() *types.TypeName }); ok { + tns = append(tns, hasName.Obj()) + } + switch t := t.(type) { + case *types.Basic: + tns = append(tns, types.Universe.Lookup(t.Name()).(*types.TypeName)) + case *types.Named: + for t := range t.TypeArgs().Types() { + visit(t) + } + case *types.Alias: + for t := range t.TypeArgs().Types() { + visit(t) + } + case *types.TypeParam: + tns = append(tns, t.Obj()) + case *types.Pointer: + visit(t.Elem()) + case *types.Slice: + visit(t.Elem()) + case *types.Array: + visit(t.Elem()) + case *types.Chan: + visit(t.Elem()) + case *types.Map: + visit(t.Key()) + visit(t.Elem()) + case *types.Struct: + for i := range t.NumFields() { + visit(t.Field(i).Type()) + } + case *types.Signature: + // Ignore the receiver: although it may be present, it has no meaning + // in a type expression. + // Ditto for receiver type params. + // Also, function type params cannot appear in a type expression. + if t.TypeParams() != nil { + panic("Signature.TypeParams in type expression") + } + visit(t.Params()) + visit(t.Results()) + case *types.Interface: + for i := range t.NumEmbeddeds() { + visit(t.EmbeddedType(i)) + } + for i := range t.NumExplicitMethods() { + visit(t.ExplicitMethod(i).Type()) + } + case *types.Tuple: + for v := range t.Variables() { + visit(v.Type()) + } + case *types.Union: + panic("Union in type expression") + default: + panic(fmt.Sprintf("unknown type %T", t)) + } + } + + visit(t) + + return tns +} + +// If con is an inlinable constant, suggest inlining its use at cur. +func (a *analyzer) inlineConst(con *types.Const, cur inspector.Cursor) { + incon, ok := a.inlinableConsts[con] + if !ok { + var fact goFixInlineConstFact + if a.pass.ImportObjectFact(con, &fact) { + incon = &fact + a.inlinableConsts[con] = incon + } + } + if incon == nil { + return // nope + } + + // If n is qualified by a package identifier, we'll need the full selector expression. + curFile := astutil.EnclosingFile(cur) + n := cur.Node().(*ast.Ident) + + // We have an identifier A here (n), possibly qualified by a package identifier (sel.X, + // where sel is the parent of n), // and an inlinable "const A = B" elsewhere (incon). + // Consider replacing A with B. + + // Check that the expression we are inlining (B) means the same thing + // (refers to the same object) in n's scope as it does in A's scope. + // If the RHS is not in the current package, AddImport will handle + // shadowing, so we only need to worry about when both expressions + // are in the current package. + if a.pass.Pkg.Path() == incon.RHSPkgPath { + // incon.rhsObj is the object referred to by B in the definition of A. + scope := a.pass.TypesInfo.Scopes[curFile].Innermost(n.Pos()) // n's scope + _, obj := scope.LookupParent(incon.RHSName, n.Pos()) // what "B" means in n's scope + if obj == nil { + // Should be impossible: if code at n can refer to the LHS, + // it can refer to the RHS. + panic(fmt.Sprintf("no object for inlinable const %s RHS %s", n.Name, incon.RHSName)) + } + if obj != incon.rhsObj { + // "B" means something different here than at the inlinable const's scope. + return + } + } else if !packagepath.CanImport(a.pass.Pkg.Path(), incon.RHSPkgPath) { + // If this package can't see the RHS's package, we can't inline. + return + } + var ( + importPrefix string + edits []analysis.TextEdit + ) + if incon.RHSPkgPath != a.pass.Pkg.Path() { + importPrefix, edits = refactor.AddImport( + a.pass.TypesInfo, curFile, incon.RHSPkgName, incon.RHSPkgPath, incon.RHSName, n.Pos()) + } + // If n is qualified by a package identifier, we'll need the full selector expression. + var expr ast.Expr = n + if astutil.IsChildOf(cur, edge.SelectorExpr_Sel) { + expr = cur.Parent().Node().(ast.Expr) + } + a.reportInline("constant", "Constant", expr, edits, importPrefix+incon.RHSName) +} + +// reportInline reports a diagnostic for fixing an inlinable name. +func (a *analyzer) reportInline(kind, capKind string, ident ast.Expr, edits []analysis.TextEdit, newText string) { + edits = append(edits, analysis.TextEdit{ + Pos: ident.Pos(), + End: ident.End(), + NewText: []byte(newText), + }) + name := astutil.Format(a.pass.Fset, ident) + a.pass.Report(analysis.Diagnostic{ + Pos: ident.Pos(), + End: ident.End(), + Message: fmt.Sprintf("%s %s should be inlined", capKind, name), + SuggestedFixes: []analysis.SuggestedFix{{ + Message: fmt.Sprintf("Inline %s %s", kind, name), + TextEdits: edits, + }}, + }) +} + +func (a *analyzer) readFile(node ast.Node) ([]byte, error) { + filename := a.pass.Fset.File(node.Pos()).Name() + content, ok := a.fileContent[filename] + if !ok { + var err error + content, err = a.pass.ReadFile(filename) + if err != nil { + return nil, err + } + a.fileContent[filename] = content + } + return content, nil +} + +// A goFixInlineFuncFact is exported for each function marked "//go:fix inline". +// It holds information about the callee to support inlining. +type goFixInlineFuncFact struct{ Callee *inline.Callee } + +func (f *goFixInlineFuncFact) String() string { return "goFixInline " + f.Callee.String() } +func (*goFixInlineFuncFact) AFact() {} + +// A goFixInlineConstFact is exported for each constant marked "//go:fix inline". +// It holds information about an inlinable constant. Gob-serializable. +type goFixInlineConstFact struct { + // Information about "const LHSName = RHSName". + RHSName string + RHSPkgPath string + RHSPkgName string + rhsObj types.Object // for current package +} + +func (c *goFixInlineConstFact) String() string { + return fmt.Sprintf("goFixInline const %q.%s", c.RHSPkgPath, c.RHSName) +} + +func (*goFixInlineConstFact) AFact() {} + +// A goFixInlineAliasFact is exported for each type alias marked "//go:fix inline". +// It holds no information; its mere existence demonstrates that an alias is inlinable. +type goFixInlineAliasFact struct{} + +func (c *goFixInlineAliasFact) String() string { return "goFixInline alias" } +func (*goFixInlineAliasFact) AFact() {} + +func discard(string, ...any) {} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go deleted file mode 100644 index d3df898d301..00000000000 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package analysisutil defines various helper functions -// used by two or more packages beneath go/analysis. -package analysisutil - -import ( - "go/ast" - "go/token" - "go/types" - "os" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/internal/analysisinternal" -) - -// HasSideEffects reports whether evaluation of e has side effects. -func HasSideEffects(info *types.Info, e ast.Expr) bool { - safe := true - ast.Inspect(e, func(node ast.Node) bool { - switch n := node.(type) { - case *ast.CallExpr: - typVal := info.Types[n.Fun] - switch { - case typVal.IsType(): - // Type conversion, which is safe. - case typVal.IsBuiltin(): - // Builtin func, conservatively assumed to not - // be safe for now. - safe = false - return false - default: - // A non-builtin func or method call. - // Conservatively assume that all of them have - // side effects for now. - safe = false - return false - } - case *ast.UnaryExpr: - if n.Op == token.ARROW { - safe = false - return false - } - } - return true - }) - return !safe -} - -// ReadFile reads a file and adds it to the FileSet -// so that we can report errors against it using lineStart. -func ReadFile(pass *analysis.Pass, filename string) ([]byte, *token.File, error) { - readFile := pass.ReadFile - if readFile == nil { - readFile = os.ReadFile - } - content, err := readFile(filename) - if err != nil { - return nil, nil, err - } - tf := pass.Fset.AddFile(filename, -1, len(content)) - tf.SetLinesForContent(content) - return content, tf, nil -} - -// LineStart returns the position of the start of the specified line -// within file f, or NoPos if there is no line of that number. -func LineStart(f *token.File, line int) token.Pos { - // Use binary search to find the start offset of this line. - // - // TODO(adonovan): eventually replace this function with the - // simpler and more efficient (*go/token.File).LineStart, added - // in go1.12. - - min := 0 // inclusive - max := f.Size() // exclusive - for { - offset := (min + max) / 2 - pos := f.Pos(offset) - posn := f.Position(pos) - if posn.Line == line { - return pos - (token.Pos(posn.Column) - 1) - } - - if min+1 >= max { - return token.NoPos - } - - if posn.Line < line { - min = offset - } else { - max = offset - } - } -} - -var MustExtractDoc = analysisinternal.MustExtractDoc diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/internal/gofixdirective/gofixdirective.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/internal/gofixdirective/gofixdirective.go new file mode 100644 index 00000000000..949df4bfeac --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/internal/gofixdirective/gofixdirective.go @@ -0,0 +1,143 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gofixdirective searches for and validates go:fix directives. The +// go/analysis/passes/inline package uses findgofix to perform inlining. +// The go/analysis/passes/gofix package uses findgofix to check for problems +// with go:fix directives. +// +// gofixdirective is separate from gofix to avoid depending on refactor/inline, +// which is large. +package gofixdirective + +// This package is tested by go/analysis/passes/inline. + +import ( + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/inspector" + internalastutil "golang.org/x/tools/internal/astutil" +) + +// A Handler handles language entities with go:fix directives. +type Handler interface { + HandleFunc(*ast.FuncDecl) + HandleAlias(*ast.TypeSpec) + HandleConst(name, rhs *ast.Ident) +} + +// Find finds functions and constants annotated with an appropriate "//go:fix" +// comment (the syntax proposed by #32816), and calls handler methods for each one. +// h may be nil. +func Find(pass *analysis.Pass, root inspector.Cursor, h Handler) { + for cur := range root.Preorder((*ast.FuncDecl)(nil), (*ast.GenDecl)(nil)) { + switch decl := cur.Node().(type) { + case *ast.FuncDecl: + findFunc(decl, h) + + case *ast.GenDecl: + if decl.Tok != token.CONST && decl.Tok != token.TYPE { + continue + } + declInline := hasFixInline(decl.Doc) + // Accept inline directives on the entire decl as well as individual specs. + for _, spec := range decl.Specs { + switch spec := spec.(type) { + case *ast.TypeSpec: // Tok == TYPE + findAlias(pass, spec, declInline, h) + + case *ast.ValueSpec: // Tok == CONST + findConst(pass, spec, declInline, h) + } + } + } + } +} + +func findFunc(decl *ast.FuncDecl, h Handler) { + if !hasFixInline(decl.Doc) { + return + } + if h != nil { + h.HandleFunc(decl) + } +} + +func findAlias(pass *analysis.Pass, spec *ast.TypeSpec, declInline bool, h Handler) { + if !declInline && !hasFixInline(spec.Doc) { + return + } + if !spec.Assign.IsValid() { + pass.Reportf(spec.Pos(), "invalid //go:fix inline directive: not a type alias") + return + } + + // Disallow inlines of type expressions containing array types. + // Given an array type like [N]int where N is a named constant, go/types provides + // only the value of the constant as an int64. So inlining A in this code: + // + // const N = 5 + // type A = [N]int + // + // would result in [5]int, breaking the connection with N. + for n := range ast.Preorder(spec.Type) { + if ar, ok := n.(*ast.ArrayType); ok && ar.Len != nil { + // Make an exception when the array length is a literal int. + if lit, ok := ast.Unparen(ar.Len).(*ast.BasicLit); ok && lit.Kind == token.INT { + continue + } + pass.Reportf(spec.Pos(), "invalid //go:fix inline directive: array types not supported") + return + } + } + if h != nil { + h.HandleAlias(spec) + } +} + +func findConst(pass *analysis.Pass, spec *ast.ValueSpec, declInline bool, h Handler) { + specInline := hasFixInline(spec.Doc) + if declInline || specInline { + for i, nameIdent := range spec.Names { + if i >= len(spec.Values) { + // Possible following an iota. + break + } + var rhsIdent *ast.Ident + switch val := spec.Values[i].(type) { + case *ast.Ident: + // Constants defined with the predeclared iota cannot be inlined. + if pass.TypesInfo.Uses[val] == builtinIota { + pass.Reportf(val.Pos(), "invalid //go:fix inline directive: const value is iota") + return + } + rhsIdent = val + case *ast.SelectorExpr: + rhsIdent = val.Sel + default: + pass.Reportf(val.Pos(), "invalid //go:fix inline directive: const value is not the name of another constant") + return + } + if h != nil { + h.HandleConst(nameIdent, rhsIdent) + } + } + } +} + +// hasFixInline reports the presence of a "//go:fix inline" directive +// in the comments. +func hasFixInline(cg *ast.CommentGroup) bool { + for _, d := range internalastutil.Directives(cg) { + if d.Tool == "go" && d.Name == "fix" && d.Args == "inline" { + return true + } + } + return false +} + +var builtinIota = types.Universe.Lookup("iota") diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go index 2580a0ac21f..868226328fc 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go @@ -11,7 +11,6 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" "golang.org/x/tools/internal/analysisinternal" @@ -24,7 +23,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "loopclosure", - Doc: analysisutil.MustExtractDoc(doc, "loopclosure"), + Doc: analysisinternal.MustExtractDoc(doc, "loopclosure"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/loopclosure", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -369,5 +368,5 @@ func isMethodCall(info *types.Info, expr ast.Expr, pkgPath, typeName, method str // Check that the receiver is a . or // *.. _, named := typesinternal.ReceiverNamed(recv) - return analysisinternal.IsTypeNamed(named, pkgPath, typeName) + return typesinternal.IsTypeNamed(named, pkgPath, typeName) } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go index c0746789e9c..dfaecf51e25 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go @@ -13,11 +13,11 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/ctrlflow" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/cfg" "golang.org/x/tools/internal/analysisinternal" "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -25,7 +25,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "lostcancel", - Doc: analysisutil.MustExtractDoc(doc, "lostcancel"), + Doc: analysisinternal.MustExtractDoc(doc, "lostcancel"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/lostcancel", Run: run, Requires: []*analysis.Analyzer{ @@ -50,7 +50,7 @@ var contextPackage = "context" // checkLostCancel analyzes a single named or literal function. func run(pass *analysis.Pass) (any, error) { // Fast path: bypass check if file doesn't use context.WithCancel. - if !analysisinternal.Imports(pass.Pkg, contextPackage) { + if !typesinternal.Imports(pass.Pkg, contextPackage) { return nil, nil } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/any.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/any.go new file mode 100644 index 00000000000..05999f8f2b7 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/any.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +import ( + "go/ast" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysisinternal/generated" +) + +var AnyAnalyzer = &analysis.Analyzer{ + Name: "any", + Doc: analysisinternal.MustExtractDoc(doc, "any"), + Requires: []*analysis.Analyzer{ + generated.Analyzer, + inspect.Analyzer, + }, + Run: runAny, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#any", +} + +// The any pass replaces interface{} with go1.18's 'any'. +func runAny(pass *analysis.Pass) (any, error) { + skipGenerated(pass) + + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + for curFile := range filesUsing(inspect, pass.TypesInfo, "go1.18") { + for curIface := range curFile.Preorder((*ast.InterfaceType)(nil)) { + iface := curIface.Node().(*ast.InterfaceType) + + if iface.Methods.NumFields() == 0 { + // Check that 'any' is not shadowed. + if lookup(pass.TypesInfo, curIface, "any") == builtinAny { + pass.Report(analysis.Diagnostic{ + Pos: iface.Pos(), + End: iface.End(), + Message: "interface{} can be replaced by any", + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Replace interface{} by any", + TextEdits: []analysis.TextEdit{ + { + Pos: iface.Pos(), + End: iface.End(), + NewText: []byte("any"), + }, + }, + }}, + }) + } + } + } + } + return nil, nil +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/bloop.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/bloop.go new file mode 100644 index 00000000000..eb1ac170c69 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/bloop.go @@ -0,0 +1,250 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysisinternal/generated" + typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/moreiters" + "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/typesinternal/typeindex" +) + +var BLoopAnalyzer = &analysis.Analyzer{ + Name: "bloop", + Doc: analysisinternal.MustExtractDoc(doc, "bloop"), + Requires: []*analysis.Analyzer{ + generated.Analyzer, + inspect.Analyzer, + typeindexanalyzer.Analyzer, + }, + Run: bloop, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#bloop", +} + +// bloop updates benchmarks that use "for range b.N", replacing it +// with go1.24's b.Loop() and eliminating any preceding +// b.{Start,Stop,Reset}Timer calls. +// +// Variants: +// +// for i := 0; i < b.N; i++ {} => for b.Loop() {} +// for range b.N {} +func bloop(pass *analysis.Pass) (any, error) { + skipGenerated(pass) + + if !typesinternal.Imports(pass.Pkg, "testing") { + return nil, nil + } + + var ( + inspect = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + info = pass.TypesInfo + ) + + // edits computes the text edits for a matched for/range loop + // at the specified cursor. b is the *testing.B value, and + // (start, end) is the portion using b.N to delete. + edits := func(curLoop inspector.Cursor, b ast.Expr, start, end token.Pos) (edits []analysis.TextEdit) { + curFn, _ := enclosingFunc(curLoop) + // Within the same function, delete all calls to + // b.{Start,Stop,Timer} that precede the loop. + filter := []ast.Node{(*ast.ExprStmt)(nil), (*ast.FuncLit)(nil)} + curFn.Inspect(filter, func(cur inspector.Cursor) (descend bool) { + node := cur.Node() + if is[*ast.FuncLit](node) { + return false // don't descend into FuncLits (e.g. sub-benchmarks) + } + stmt := node.(*ast.ExprStmt) + if stmt.Pos() > start { + return false // not preceding: stop + } + if call, ok := stmt.X.(*ast.CallExpr); ok { + obj := typeutil.Callee(info, call) + if typesinternal.IsMethodNamed(obj, "testing", "B", "StopTimer", "StartTimer", "ResetTimer") { + // Delete call statement. + // TODO(adonovan): delete following newline, or + // up to start of next stmt? (May delete a comment.) + edits = append(edits, analysis.TextEdit{ + Pos: stmt.Pos(), + End: stmt.End(), + }) + } + } + return true + }) + + // Replace ...b.N... with b.Loop(). + return append(edits, analysis.TextEdit{ + Pos: start, + End: end, + NewText: fmt.Appendf(nil, "%s.Loop()", astutil.Format(pass.Fset, b)), + }) + } + + // Find all for/range statements. + loops := []ast.Node{ + (*ast.ForStmt)(nil), + (*ast.RangeStmt)(nil), + } + for curFile := range filesUsing(inspect, info, "go1.24") { + for curLoop := range curFile.Preorder(loops...) { + switch n := curLoop.Node().(type) { + case *ast.ForStmt: + // for _; i < b.N; _ {} + if cmp, ok := n.Cond.(*ast.BinaryExpr); ok && cmp.Op == token.LSS { + if sel, ok := cmp.Y.(*ast.SelectorExpr); ok && + sel.Sel.Name == "N" && + typesinternal.IsPointerToNamed(info.TypeOf(sel.X), "testing", "B") && usesBenchmarkNOnce(curLoop, info) { + + delStart, delEnd := n.Cond.Pos(), n.Cond.End() + + // Eliminate variable i if no longer needed: + // for i := 0; i < b.N; i++ { + // ...no references to i... + // } + body, _ := curLoop.LastChild() + if v := isIncrementLoop(info, n); v != nil && + !uses(index, body, v) { + delStart, delEnd = n.Init.Pos(), n.Post.End() + } + + pass.Report(analysis.Diagnostic{ + // Highlight "i < b.N". + Pos: n.Cond.Pos(), + End: n.Cond.End(), + Message: "b.N can be modernized using b.Loop()", + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Replace b.N with b.Loop()", + TextEdits: edits(curLoop, sel.X, delStart, delEnd), + }}, + }) + } + } + + case *ast.RangeStmt: + // for range b.N {} -> for b.Loop() {} + // + // TODO(adonovan): handle "for i := range b.N". + if sel, ok := n.X.(*ast.SelectorExpr); ok && + n.Key == nil && + n.Value == nil && + sel.Sel.Name == "N" && + typesinternal.IsPointerToNamed(info.TypeOf(sel.X), "testing", "B") && usesBenchmarkNOnce(curLoop, info) { + + pass.Report(analysis.Diagnostic{ + // Highlight "range b.N". + Pos: n.Range, + End: n.X.End(), + Message: "b.N can be modernized using b.Loop()", + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Replace b.N with b.Loop()", + TextEdits: edits(curLoop, sel.X, n.Range, n.X.End()), + }}, + }) + } + } + } + } + return nil, nil +} + +// uses reports whether the subtree cur contains a use of obj. +func uses(index *typeindex.Index, cur inspector.Cursor, obj types.Object) bool { + for use := range index.Uses(obj) { + if cur.Contains(use) { + return true + } + } + return false +} + +// enclosingFunc returns the cursor for the innermost Func{Decl,Lit} +// that encloses c, if any. +func enclosingFunc(c inspector.Cursor) (inspector.Cursor, bool) { + return moreiters.First(c.Enclosing((*ast.FuncDecl)(nil), (*ast.FuncLit)(nil))) +} + +// usesBenchmarkNOnce reports whether a b.N loop should be modernized to b.Loop(). +// Only modernize loops that are: +// 1. Directly in a benchmark function (not in nested functions) +// - b.Loop() must be called in the same goroutine as the benchmark function +// - Function literals are often used with goroutines (go func(){...}) +// +// 2. The only b.N loop in that benchmark function +// - b.Loop() can only be called once per benchmark execution +// - Multiple calls result in "B.Loop called with timer stopped" error +func usesBenchmarkNOnce(c inspector.Cursor, info *types.Info) bool { + // Find the enclosing benchmark function + curFunc, ok := enclosingFunc(c) + if !ok { + return false + } + + // Check if this is actually a benchmark function + fdecl, ok := curFunc.Node().(*ast.FuncDecl) + if !ok { + return false // not in a function; or, inside a FuncLit + } + if !isBenchmarkFunc(fdecl) { + return false + } + + // Count b.N references in this benchmark function + bnRefCount := 0 + filter := []ast.Node{(*ast.SelectorExpr)(nil), (*ast.FuncLit)(nil)} + curFunc.Inspect(filter, func(cur inspector.Cursor) bool { + switch n := cur.Node().(type) { + case *ast.FuncLit: + return false // don't descend into nested function literals + case *ast.SelectorExpr: + if n.Sel.Name == "N" && typesinternal.IsPointerToNamed(info.TypeOf(n.X), "testing", "B") { + bnRefCount++ + } + } + return true + }) + + // Only modernize if there's exactly one b.N reference + return bnRefCount == 1 +} + +// isBenchmarkFunc reports whether f is a benchmark function. +func isBenchmarkFunc(f *ast.FuncDecl) bool { + return f.Recv == nil && + f.Name != nil && + f.Name.IsExported() && + strings.HasPrefix(f.Name.Name, "Benchmark") && + f.Type.Params != nil && + len(f.Type.Params.List) == 1 +} + +// isIncrementLoop reports whether loop has the form "for i := 0; ...; i++ { ... }", +// and if so, it returns the symbol for the index variable. +func isIncrementLoop(info *types.Info, loop *ast.ForStmt) *types.Var { + if assign, ok := loop.Init.(*ast.AssignStmt); ok && + assign.Tok == token.DEFINE && + len(assign.Rhs) == 1 && + isZeroIntLiteral(info, assign.Rhs[0]) && + is[*ast.IncDecStmt](loop.Post) && + loop.Post.(*ast.IncDecStmt).Tok == token.INC && + astutil.EqualSyntax(loop.Post.(*ast.IncDecStmt).X, assign.Lhs[0]) { + return info.Defs[assign.Lhs[0].(*ast.Ident)].(*types.Var) + } + return nil +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/doc.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/doc.go new file mode 100644 index 00000000000..bc143d7a6d0 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/doc.go @@ -0,0 +1,455 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package modernize provides a suite of analyzers that suggest +simplifications to Go code, using modern language and library +features. + +Each diagnostic provides a fix. Our intent is that these fixes may +be safely applied en masse without changing the behavior of your +program. In some cases the suggested fixes are imperfect and may +lead to (for example) unused imports or unused local variables, +causing build breakage. However, these problems are generally +trivial to fix. We regard any modernizer whose fix changes program +behavior to have a serious bug and will endeavor to fix it. + +To apply all modernization fixes en masse, you can use the +following command: + + $ go run golang.org/x/tools/go/analysis/passes/modernize/cmd/modernize@latest -fix ./... + +(Do not use "go get -tool" to add gopls as a dependency of your +module; gopls commands must be built from their release branch.) + +If the tool warns of conflicting fixes, you may need to run it more +than once until it has applied all fixes cleanly. This command is +not an officially supported interface and may change in the future. + +Changes produced by this tool should be reviewed as usual before +being merged. In some cases, a loop may be replaced by a simple +function call, causing comments within the loop to be discarded. +Human judgment may be required to avoid losing comments of value. + +The modernize suite contains many analyzers. Diagnostics from some, +such as "any" (which replaces "interface{}" with "any" where it +is safe to do so), are particularly numerous. It may ease the burden of +code review to apply fixes in two steps, the first consisting only of +fixes from the "any" analyzer, the second consisting of all +other analyzers. This can be achieved using flags, as in this example: + + $ modernize -any=true -fix ./... + $ modernize -any=false -fix ./... + +# Analyzer appendclipped + +appendclipped: simplify append chains using slices.Concat + +The appendclipped analyzer suggests replacing chains of append calls with a +single call to slices.Concat, which was added in Go 1.21. For example, +append(append(s, s1...), s2...) would be simplified to slices.Concat(s, s1, s2). + +In the simple case of appending to a newly allocated slice, such as +append([]T(nil), s...), the analyzer suggests the more concise slices.Clone(s). +For byte slices, it will prefer bytes.Clone if the "bytes" package is +already imported. + +This fix is only applied when the base of the append tower is a +"clipped" slice, meaning its length and capacity are equal (e.g. +x[:0:0] or []T{}). This is to avoid changing program behavior by +eliminating intended side effects on the base slice's underlying +array. + +This analyzer is currently disabled by default as the +transformation does not preserve the nilness of the base slice in +all cases; see https://go.dev/issue/73557. + +# Analyzer bloop + +bloop: replace for-range over b.N with b.Loop + +The bloop analyzer suggests replacing benchmark loops of the form +`for i := 0; i < b.N; i++` or `for range b.N` with the more modern +`for b.Loop()`, which was added in Go 1.24. + +This change makes benchmark code more readable and also removes the need for +manual timer control, so any preceding calls to b.StartTimer, b.StopTimer, +or b.ResetTimer within the same function will also be removed. + +Caveats: The b.Loop() method is designed to prevent the compiler from +optimizing away the benchmark loop, which can occasionally result in +slower execution due to increased allocations in some specific cases. + +# Analyzer any + +any: replace interface{} with any + +The any analyzer suggests replacing uses of the empty interface type, +`interface{}`, with the `any` alias, which was introduced in Go 1.18. +This is a purely stylistic change that makes code more readable. + +# Analyzer errorsastype + +errorsastype: replace errors.As with errors.AsType[T] + +This analyzer suggests fixes to simplify uses of [errors.As] of +this form: + + var myerr *MyErr + if errors.As(err, &myerr) { + handle(myerr) + } + +by using the less error-prone generic [errors.AsType] function, +introduced in Go 1.26: + + if myerr, ok := errors.AsType[*MyErr](err); ok { + handle(myerr) + } + +The fix is only offered if the var declaration has the form shown and +there are no uses of myerr outside the if statement. + +# Analyzer fmtappendf + +fmtappendf: replace []byte(fmt.Sprintf) with fmt.Appendf + +The fmtappendf analyzer suggests replacing `[]byte(fmt.Sprintf(...))` with +`fmt.Appendf(nil, ...)`. This avoids the intermediate allocation of a string +by Sprintf, making the code more efficient. The suggestion also applies to +fmt.Sprint and fmt.Sprintln. + +# Analyzer forvar + +forvar: remove redundant re-declaration of loop variables + +The forvar analyzer removes unnecessary shadowing of loop variables. +Before Go 1.22, it was common to write `for _, x := range s { x := x ... }` +to create a fresh variable for each iteration. Go 1.22 changed the semantics +of `for` loops, making this pattern redundant. This analyzer removes the +unnecessary `x := x` statement. + +This fix only applies to `range` loops. + +# Analyzer mapsloop + +mapsloop: replace explicit loops over maps with calls to maps package + +The mapsloop analyzer replaces loops of the form + + for k, v := range x { m[k] = v } + +with a single call to a function from the `maps` package, added in Go 1.23. +Depending on the context, this could be `maps.Copy`, `maps.Insert`, +`maps.Clone`, or `maps.Collect`. + +The transformation to `maps.Clone` is applied conservatively, as it +preserves the nilness of the source map, which may be a subtle change in +behavior if the original code did not handle a nil map in the same way. + +# Analyzer minmax + +minmax: replace if/else statements with calls to min or max + +The minmax analyzer simplifies conditional assignments by suggesting the use +of the built-in `min` and `max` functions, introduced in Go 1.21. For example, + + if a < b { x = a } else { x = b } + +is replaced by + + x = min(a, b). + +This analyzer avoids making suggestions for floating-point types, +as the behavior of `min` and `max` with NaN values can differ from +the original if/else statement. + +# Analyzer newexpr + +newexpr: simplify code by using go1.26's new(expr) + +This analyzer finds declarations of functions of this form: + + func varOf(x int) *int { return &x } + +and suggests a fix to turn them into inlinable wrappers around +go1.26's built-in new(expr) function: + + func varOf(x int) *int { return new(x) } + +In addition, this analyzer suggests a fix for each call +to one of the functions before it is transformed, so that + + use(varOf(123)) + +is replaced by: + + use(new(123)) + +(Wrapper functions such as varOf are common when working with Go +serialization packages such as for JSON or protobuf, where pointers +are often used to express optionality.) + +# Analyzer omitzero + +omitzero: suggest replacing omitempty with omitzero for struct fields + +The omitzero analyzer identifies uses of the `omitempty` JSON struct tag on +fields that are themselves structs. The `omitempty` tag has no effect on +struct-typed fields. The analyzer offers two suggestions: either remove the +tag, or replace it with `omitzero` (added in Go 1.24), which correctly +omits the field if the struct value is zero. + +Replacing `omitempty` with `omitzero` is a change in behavior. The +original code would always encode the struct field, whereas the +modified code will omit it if it is a zero-value. + +# Analyzer plusbuild + +plusbuild: remove obsolete //+build comments + +The plusbuild analyzer suggests a fix to remove obsolete build tags +of the form: + + //+build linux,amd64 + +in files that also contain a Go 1.18-style tag such as: + + //go:build linux && amd64 + +(It does not check that the old and new tags are consistent; +that is the job of the 'buildtag' analyzer in the vet suite.) + +# Analyzer rangeint + +rangeint: replace 3-clause for loops with for-range over integers + +The rangeint analyzer suggests replacing traditional for loops such +as + + for i := 0; i < n; i++ { ... } + +with the more idiomatic Go 1.22 style: + + for i := range n { ... } + +This transformation is applied only if (a) the loop variable is not +modified within the loop body and (b) the loop's limit expression +is not modified within the loop, as `for range` evaluates its +operand only once. + +# Analyzer reflecttypefor + +reflecttypefor: replace reflect.TypeOf(x) with TypeFor[T]() + +This analyzer suggests fixes to replace uses of reflect.TypeOf(x) with +reflect.TypeFor, introduced in go1.22, when the desired runtime type +is known at compile time, for example: + + reflect.TypeOf(uint32(0)) -> reflect.TypeFor[uint32]() + reflect.TypeOf((*ast.File)(nil)) -> reflect.TypeFor[*ast.File]() + +It also offers a fix to simplify the construction below, which uses +reflect.TypeOf to return the runtime type for an interface type, + + reflect.TypeOf((*io.Reader)(nil)).Elem() + +to: + + reflect.TypeFor[io.Reader]() + +No fix is offered in cases when the runtime type is dynamic, such as: + + var r io.Reader = ... + reflect.TypeOf(r) + +or when the operand has potential side effects. + +# Analyzer slicescontains + +slicescontains: replace loops with slices.Contains or slices.ContainsFunc + +The slicescontains analyzer simplifies loops that check for the existence of +an element in a slice. It replaces them with calls to `slices.Contains` or +`slices.ContainsFunc`, which were added in Go 1.21. + +If the expression for the target element has side effects, this +transformation will cause those effects to occur only once, not +once per tested slice element. + +# Analyzer slicesdelete + +slicesdelete: replace append-based slice deletion with slices.Delete + +The slicesdelete analyzer suggests replacing the idiom + + s = append(s[:i], s[j:]...) + +with the more explicit + + s = slices.Delete(s, i, j) + +introduced in Go 1.21. + +This analyzer is disabled by default. The `slices.Delete` function +zeros the elements between the new length and the old length of the +slice to prevent memory leaks, which is a subtle difference in +behavior compared to the append-based idiom; see https://go.dev/issue/73686. + +# Analyzer slicessort + +slicessort: replace sort.Slice with slices.Sort for basic types + +The slicessort analyzer simplifies sorting slices of basic ordered +types. It replaces + + sort.Slice(s, func(i, j int) bool { return s[i] < s[j] }) + +with the simpler `slices.Sort(s)`, which was added in Go 1.21. + +# Analyzer stditerators + +stditerators: use iterators instead of Len/At-style APIs + +This analyzer suggests a fix to replace each loop of the form: + + for i := 0; i < x.Len(); i++ { + use(x.At(i)) + } + +or its "for elem := range x.Len()" equivalent by a range loop over an +iterator offered by the same data type: + + for elem := range x.All() { + use(x.At(i) + } + +where x is one of various well-known types in the standard library. + +# Analyzer stringscutprefix + +stringscutprefix: replace HasPrefix/TrimPrefix with CutPrefix + +The stringscutprefix analyzer simplifies a common pattern where code first +checks for a prefix with `strings.HasPrefix` and then removes it with +`strings.TrimPrefix`. It replaces this two-step process with a single call +to `strings.CutPrefix`, introduced in Go 1.20. The analyzer also handles +the equivalent functions in the `bytes` package. + +For example, this input: + + if strings.HasPrefix(s, prefix) { + use(strings.TrimPrefix(s, prefix)) + } + +is fixed to: + + if after, ok := strings.CutPrefix(s, prefix); ok { + use(after) + } + +The analyzer also offers fixes to use CutSuffix in a similar way. +This input: + + if strings.HasSuffix(s, suffix) { + use(strings.TrimSuffix(s, suffix)) + } + +is fixed to: + + if before, ok := strings.CutSuffix(s, suffix); ok { + use(before) + } + +# Analyzer stringsseq + +stringsseq: replace ranging over Split/Fields with SplitSeq/FieldsSeq + +The stringsseq analyzer improves the efficiency of iterating over substrings. +It replaces + + for range strings.Split(...) + +with the more efficient + + for range strings.SplitSeq(...) + +which was added in Go 1.24 and avoids allocating a slice for the +substrings. The analyzer also handles strings.Fields and the +equivalent functions in the bytes package. + +# Analyzer stringsbuilder + +stringsbuilder: replace += with strings.Builder + +This analyzer replaces repeated string += string concatenation +operations with calls to Go 1.10's strings.Builder. + +For example: + + var s = "[" + for x := range seq { + s += x + s += "." + } + s += "]" + use(s) + +is replaced by: + + var s strings.Builder + s.WriteString("[") + for x := range seq { + s.WriteString(x) + s.WriteString(".") + } + s.WriteString("]") + use(s.String()) + +This avoids quadratic memory allocation and improves performance. + +The analyzer requires that all references to s except the final one +are += operations. To avoid warning about trivial cases, at least one +must appear within a loop. The variable s must be a local +variable, not a global or parameter. + +The sole use of the finished string must be the last reference to the +variable s. (It may appear within an intervening loop or function literal, +since even s.String() is called repeatedly, it does not allocate memory.) + +# Analyzer testingcontext + +testingcontext: replace context.WithCancel with t.Context in tests + +The testingcontext analyzer simplifies context management in tests. It +replaces the manual creation of a cancellable context, + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + +with a single call to t.Context(), which was added in Go 1.24. + +This change is only suggested if the `cancel` function is not used +for any other purpose. + +# Analyzer waitgroup + +waitgroup: replace wg.Add(1)/go/wg.Done() with wg.Go + +The waitgroup analyzer simplifies goroutine management with `sync.WaitGroup`. +It replaces the common pattern + + wg.Add(1) + go func() { + defer wg.Done() + ... + }() + +with a single call to + + wg.Go(func(){ ... }) + +which was added in Go 1.25. +*/ +package modernize diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/errorsastype.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/errorsastype.go new file mode 100644 index 00000000000..b6387ad8406 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/errorsastype.go @@ -0,0 +1,243 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +import ( + "go/ast" + "go/token" + "go/types" + + "fmt" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/edge" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysisinternal/generated" + typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/goplsexport" + "golang.org/x/tools/internal/refactor" + "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/typesinternal/typeindex" +) + +var errorsastypeAnalyzer = &analysis.Analyzer{ + Name: "errorsastype", + Doc: analysisinternal.MustExtractDoc(doc, "errorsastype"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#errorsastype", + Requires: []*analysis.Analyzer{generated.Analyzer, typeindexanalyzer.Analyzer}, + Run: errorsastype, +} + +func init() { + // Export to gopls until this is a published modernizer. + goplsexport.ErrorsAsTypeModernizer = errorsastypeAnalyzer +} + +// errorsastype offers a fix to replace error.As with the newer +// errors.AsType[T] following this pattern: +// +// var myerr *MyErr +// if errors.As(err, &myerr) { ... } +// +// => +// +// if myerr, ok := errors.AsType[*MyErr](err); ok { ... } +// +// (In principle several of these can then be chained using if/else, +// but we don't attempt that.) +// +// We offer the fix only within an if statement, but not within a +// switch case such as: +// +// var myerr *MyErr +// switch { +// case errors.As(err, &myerr): +// } +// +// because the transformation in that case would be ungainly. +// +// Note that the cmd/vet suite includes the "errorsas" analyzer, which +// detects actual mistakes in the use of errors.As. This logic does +// not belong in errorsas because the problems it fixes are merely +// stylistic. +// +// TODO(adonovan): support more cases: +// +// - Negative cases +// var myerr E +// if !errors.As(err, &myerr) { ... } +// => +// myerr, ok := errors.AsType[E](err) +// if !ok { ... } +// +// - if myerr := new(E); errors.As(err, myerr); { ... } +// +// - if errors.As(err, myerr) && othercond { ... } +func errorsastype(pass *analysis.Pass) (any, error) { + skipGenerated(pass) + + var ( + index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + info = pass.TypesInfo + ) + + for curCall := range index.Calls(index.Object("errors", "As")) { + call := curCall.Node().(*ast.CallExpr) + if len(call.Args) < 2 { + continue // spread call: errors.As(pair()) + } + + v, curDeclStmt := canUseErrorsAsType(info, index, curCall) + if v == nil { + continue + } + + file := astutil.EnclosingFile(curDeclStmt) + if !fileUses(info, file, "go1.26") { + continue // errors.AsType is too new + } + + // Locate identifier "As" in errors.As. + var asIdent *ast.Ident + switch n := ast.Unparen(call.Fun).(type) { + case *ast.Ident: + asIdent = n // "errors" was dot-imported + case *ast.SelectorExpr: + asIdent = n.Sel + default: + panic("no Ident for errors.As") + } + + // Format the type as valid Go syntax. + // TODO(adonovan): fix: FileQualifier needs to respect + // visibility at the current point, and either fail + // or edit the imports as needed. + // TODO(adonovan): fix: TypeString is not a sound way + // to print types as Go syntax as it does not respect + // symbol visibility, etc. We need something loosely + // integrated with FileQualifier that accumulates + // import edits, and may fail (e.g. for unexported + // type or field names from other packages). + // See https://go.dev/issues/75604. + qual := typesinternal.FileQualifier(file, pass.Pkg) + errtype := types.TypeString(v.Type(), qual) + + // Choose a name for the "ok" variable. + okName := "ok" + if okVar := lookup(info, curCall, "ok"); okVar != nil { + // The name 'ok' is already declared, but + // don't choose a fresh name unless okVar + // is also used within the if-statement. + curIf := curCall.Parent() + for curUse := range index.Uses(okVar) { + if curIf.Contains(curUse) { + scope := info.Scopes[curIf.Node().(*ast.IfStmt)] + okName = refactor.FreshName(scope, v.Pos(), "ok") + break + } + } + } + + pass.Report(analysis.Diagnostic{ + Pos: call.Fun.Pos(), + End: call.Fun.End(), + Message: fmt.Sprintf("errors.As can be simplified using AsType[%s]", errtype), + SuggestedFixes: []analysis.SuggestedFix{{ + Message: fmt.Sprintf("Replace errors.As with AsType[%s]", errtype), + TextEdits: append( + // delete "var myerr *MyErr" + refactor.DeleteStmt(pass.Fset.File(call.Fun.Pos()), curDeclStmt), + // if errors.As (err, &myerr) { ... } + // ------------- -------------- -------- ---- + // if myerr, ok := errors.AsType[*MyErr](err ); ok { ... } + analysis.TextEdit{ + // insert "myerr, ok := " + Pos: call.Pos(), + End: call.Pos(), + NewText: fmt.Appendf(nil, "%s, %s := ", v.Name(), okName), + }, + analysis.TextEdit{ + // replace As with AsType[T] + Pos: asIdent.Pos(), + End: asIdent.End(), + NewText: fmt.Appendf(nil, "AsType[%s]", errtype), + }, + analysis.TextEdit{ + // delete ", &myerr" + Pos: call.Args[0].End(), + End: call.Args[1].End(), + }, + analysis.TextEdit{ + // insert "; ok" + Pos: call.End(), + End: call.End(), + NewText: fmt.Appendf(nil, "; %s", okName), + }, + ), + }}, + }) + } + return nil, nil +} + +// canUseErrorsAsType reports whether curCall is a call to +// errors.As beneath an if statement, preceded by a +// declaration of the typed error var. The var must not be +// used outside the if statement. +func canUseErrorsAsType(info *types.Info, index *typeindex.Index, curCall inspector.Cursor) (_ *types.Var, _ inspector.Cursor) { + if !astutil.IsChildOf(curCall, edge.IfStmt_Cond) { + return // not beneath if statement + } + var ( + curIfStmt = curCall.Parent() + ifStmt = curIfStmt.Node().(*ast.IfStmt) + ) + if ifStmt.Init != nil { + return // if statement already has an init part + } + unary, ok := curCall.Node().(*ast.CallExpr).Args[1].(*ast.UnaryExpr) + if !ok || unary.Op != token.AND { + return // 2nd arg is not &var + } + id, ok := unary.X.(*ast.Ident) + if !ok { + return // not a simple ident (local var) + } + v := info.Uses[id].(*types.Var) + curDef, ok := index.Def(v) + if !ok { + return // var is not local (e.g. dot-imported) + } + // Have: if errors.As(err, &v) { ... } + + // Reject if v is used outside (before or after) the + // IfStmt, since that will become its new scope. + for curUse := range index.Uses(v) { + if !curIfStmt.Contains(curUse) { + return // v used before/after if statement + } + } + if !astutil.IsChildOf(curDef, edge.ValueSpec_Names) { + return // v not declared by "var v T" + } + var ( + curSpec = curDef.Parent() // ValueSpec + curDecl = curSpec.Parent() // GenDecl + spec = curSpec.Node().(*ast.ValueSpec) + ) + if len(spec.Names) != 1 || len(spec.Values) != 0 || + len(curDecl.Node().(*ast.GenDecl).Specs) != 1 { + return // not a simple "var v T" decl + } + + // Have: + // var v *MyErr + // ... + // if errors.As(err, &v) { ... } + // with no uses of v outside the IfStmt. + return v, curDecl.Parent() // DeclStmt +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/fmtappendf.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/fmtappendf.go new file mode 100644 index 00000000000..f2e53605424 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/fmtappendf.go @@ -0,0 +1,115 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +import ( + "fmt" + "go/ast" + "go/types" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/edge" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysisinternal/generated" + typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/typesinternal/typeindex" +) + +var FmtAppendfAnalyzer = &analysis.Analyzer{ + Name: "fmtappendf", + Doc: analysisinternal.MustExtractDoc(doc, "fmtappendf"), + Requires: []*analysis.Analyzer{ + generated.Analyzer, + inspect.Analyzer, + typeindexanalyzer.Analyzer, + }, + Run: fmtappendf, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#fmtappendf", +} + +// The fmtappend function replaces []byte(fmt.Sprintf(...)) by +// fmt.Appendf(nil, ...), and similarly for Sprint, Sprintln. +func fmtappendf(pass *analysis.Pass) (any, error) { + skipGenerated(pass) + + index := pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + for _, fn := range []types.Object{ + index.Object("fmt", "Sprintf"), + index.Object("fmt", "Sprintln"), + index.Object("fmt", "Sprint"), + } { + for curCall := range index.Calls(fn) { + call := curCall.Node().(*ast.CallExpr) + if ek, idx := curCall.ParentEdge(); ek == edge.CallExpr_Args && idx == 0 { + // Is parent a T(fmt.SprintX(...)) conversion? + conv := curCall.Parent().Node().(*ast.CallExpr) + tv := pass.TypesInfo.Types[conv.Fun] + if tv.IsType() && types.Identical(tv.Type, byteSliceType) && + fileUses(pass.TypesInfo, astutil.EnclosingFile(curCall), "go1.19") { + // Have: []byte(fmt.SprintX(...)) + + // Find "Sprint" identifier. + var id *ast.Ident + switch e := ast.Unparen(call.Fun).(type) { + case *ast.SelectorExpr: + id = e.Sel // "fmt.Sprint" + case *ast.Ident: + id = e // "Sprint" after `import . "fmt"` + } + + old, new := fn.Name(), strings.Replace(fn.Name(), "Sprint", "Append", 1) + edits := []analysis.TextEdit{ + { + // delete "[]byte(" + Pos: conv.Pos(), + End: conv.Lparen + 1, + }, + { + // remove ")" + Pos: conv.Rparen, + End: conv.Rparen + 1, + }, + { + Pos: id.Pos(), + End: id.End(), + NewText: []byte(new), + }, + { + Pos: call.Lparen + 1, + NewText: []byte("nil, "), + }, + } + if len(conv.Args) == 1 { + arg := conv.Args[0] + // Determine if we have T(fmt.SprintX(...)). If so, delete the non-args + // that come before the right parenthesis. Leaving an + // extra comma here produces invalid code. (See + // golang/go#74709) + if arg.End() < conv.Rparen { + edits = append(edits, analysis.TextEdit{ + Pos: arg.End(), + End: conv.Rparen, + }) + } + } + pass.Report(analysis.Diagnostic{ + Pos: conv.Pos(), + End: conv.End(), + Message: fmt.Sprintf("Replace []byte(fmt.%s...) with fmt.%s", old, new), + SuggestedFixes: []analysis.SuggestedFix{{ + Message: fmt.Sprintf("Replace []byte(fmt.%s...) with fmt.%s", old, new), + TextEdits: edits, + }}, + }) + } + } + } + } + return nil, nil +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/forvar.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/forvar.go new file mode 100644 index 00000000000..76e3a8a73c2 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/forvar.go @@ -0,0 +1,94 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +import ( + "go/ast" + "go/token" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysisinternal/generated" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/refactor" +) + +var ForVarAnalyzer = &analysis.Analyzer{ + Name: "forvar", + Doc: analysisinternal.MustExtractDoc(doc, "forvar"), + Requires: []*analysis.Analyzer{ + generated.Analyzer, + inspect.Analyzer, + }, + Run: forvar, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#forvar", +} + +// forvar offers to fix unnecessary copying of a for variable +// +// for _, x := range foo { +// x := x // offer to remove this superfluous assignment +// } +// +// Prerequisites: +// First statement in a range loop has to be := +// where the two idents are the same, +// and the ident is defined (:=) as a variable in the for statement. +// (Note that this 'fix' does not work for three clause loops +// because the Go specification says "The variable used by each subsequent iteration +// is declared implicitly before executing the post statement and initialized to the +// value of the previous iteration's variable at that moment.") +func forvar(pass *analysis.Pass) (any, error) { + skipGenerated(pass) + + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + for curFile := range filesUsing(inspect, pass.TypesInfo, "go1.22") { + for curLoop := range curFile.Preorder((*ast.RangeStmt)(nil)) { + loop := curLoop.Node().(*ast.RangeStmt) + if loop.Tok != token.DEFINE { + continue + } + isLoopVarRedecl := func(assign *ast.AssignStmt) bool { + for i, lhs := range assign.Lhs { + if !(astutil.EqualSyntax(lhs, assign.Rhs[i]) && + (astutil.EqualSyntax(lhs, loop.Key) || astutil.EqualSyntax(lhs, loop.Value))) { + return false + } + } + return true + } + // Have: for k, v := range x { stmts } + // + // Delete the prefix of stmts that are + // of the form k := k; v := v; k, v := k, v; v, k := v, k. + for _, stmt := range loop.Body.List { + if assign, ok := stmt.(*ast.AssignStmt); ok && + assign.Tok == token.DEFINE && + len(assign.Lhs) == len(assign.Rhs) && + isLoopVarRedecl(assign) { + + curStmt, _ := curLoop.FindNode(stmt) + edits := refactor.DeleteStmt(pass.Fset.File(stmt.Pos()), curStmt) + if len(edits) > 0 { + pass.Report(analysis.Diagnostic{ + Pos: stmt.Pos(), + End: stmt.End(), + Message: "copying variable is unneeded", + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Remove unneeded redeclaration", + TextEdits: edits, + }}, + }) + } + } else { + break // stop at first other statement + } + } + } + } + return nil, nil +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/maps.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/maps.go new file mode 100644 index 00000000000..3072cf6f515 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/maps.go @@ -0,0 +1,280 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +// This file defines modernizers that use the "maps" package. + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysisinternal/generated" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/refactor" + "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal" +) + +var MapsLoopAnalyzer = &analysis.Analyzer{ + Name: "mapsloop", + Doc: analysisinternal.MustExtractDoc(doc, "mapsloop"), + Requires: []*analysis.Analyzer{ + generated.Analyzer, + inspect.Analyzer, + }, + Run: mapsloop, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#mapsloop", +} + +// The mapsloop pass offers to simplify a loop of map insertions: +// +// for k, v := range x { +// m[k] = v +// } +// +// by a call to go1.23's maps package. There are four variants, the +// product of two axes: whether the source x is a map or an iter.Seq2, +// and whether the destination m is a newly created map: +// +// maps.Copy(m, x) (x is map) +// maps.Insert(m, x) (x is iter.Seq2) +// m = maps.Clone(x) (x is a non-nil map, m is a new map) +// m = maps.Collect(x) (x is iter.Seq2, m is a new map) +// +// A map is newly created if the preceding statement has one of these +// forms, where M is a map type: +// +// m = make(M) +// m = M{} +func mapsloop(pass *analysis.Pass) (any, error) { + skipGenerated(pass) + + // Skip the analyzer in packages where its + // fixes would create an import cycle. + if within(pass, "maps", "bytes", "runtime") { + return nil, nil + } + + info := pass.TypesInfo + + // check is called for each statement of this form: + // for k, v := range x { m[k] = v } + check := func(file *ast.File, curRange inspector.Cursor, assign *ast.AssignStmt, m, x ast.Expr) { + + // Is x a map or iter.Seq2? + tx := types.Unalias(info.TypeOf(x)) + var xmap bool + switch typeparams.CoreType(tx).(type) { + case *types.Map: + xmap = true + + case *types.Signature: + k, v, ok := assignableToIterSeq2(tx) + if !ok { + return // a named isomer of Seq2 + } + xmap = false + + // Record in tx the unnamed map[K]V type + // derived from the yield function. + // This is the type of maps.Collect(x). + tx = types.NewMap(k, v) + + default: + return // e.g. slice, channel (or no core type!) + } + + // Is the preceding statement of the form + // m = make(M) or M{} + // and can we replace its RHS with slices.{Clone,Collect}? + // + // Beware: if x may be nil, we cannot use Clone as it preserves nilness. + var mrhs ast.Expr // make(M) or M{}, or nil + if curPrev, ok := curRange.PrevSibling(); ok { + if assign, ok := curPrev.Node().(*ast.AssignStmt); ok && + len(assign.Lhs) == 1 && + len(assign.Rhs) == 1 && + astutil.EqualSyntax(assign.Lhs[0], m) { + + // Have: m = rhs; for k, v := range x { m[k] = v } + var newMap bool + rhs := assign.Rhs[0] + switch rhs := ast.Unparen(rhs).(type) { + case *ast.CallExpr: + if id, ok := ast.Unparen(rhs.Fun).(*ast.Ident); ok && + info.Uses[id] == builtinMake { + // Have: m = make(...) + newMap = true + } + case *ast.CompositeLit: + if len(rhs.Elts) == 0 { + // Have m = M{} + newMap = true + } + } + + // Take care not to change type of m's RHS expression. + if newMap { + trhs := info.TypeOf(rhs) + + // Inv: tx is the type of maps.F(x) + // - maps.Clone(x) has the same type as x. + // - maps.Collect(x) returns an unnamed map type. + + if assign.Tok == token.DEFINE { + // DEFINE (:=): we must not + // change the type of RHS. + if types.Identical(tx, trhs) { + mrhs = rhs + } + } else { + // ASSIGN (=): the types of LHS + // and RHS may differ in namedness. + if types.AssignableTo(tx, trhs) { + mrhs = rhs + } + } + + // Temporarily disable the transformation to the + // (nil-preserving) maps.Clone until we can prove + // that x is non-nil. This is rarely possible, + // and may require control flow analysis + // (e.g. a dominating "if len(x)" check). + // See #71844. + if xmap { + mrhs = nil + } + } + } + } + + // Choose function. + var funcName string + if mrhs != nil { + funcName = cond(xmap, "Clone", "Collect") + } else { + funcName = cond(xmap, "Copy", "Insert") + } + + // Report diagnostic, and suggest fix. + rng := curRange.Node() + prefix, importEdits := refactor.AddImport(info, file, "maps", "maps", funcName, rng.Pos()) + var ( + newText []byte + start, end token.Pos + ) + if mrhs != nil { + // Replace assignment and loop with expression. + // + // m = make(...) + // for k, v := range x { /* comments */ m[k] = v } + // + // -> + // + // /* comments */ + // m = maps.Copy(x) + curPrev, _ := curRange.PrevSibling() + start, end = curPrev.Node().Pos(), rng.End() + newText = fmt.Appendf(nil, "%s%s = %s%s(%s)", + allComments(file, start, end), + astutil.Format(pass.Fset, m), + prefix, + funcName, + astutil.Format(pass.Fset, x)) + } else { + // Replace loop with call statement. + // + // for k, v := range x { /* comments */ m[k] = v } + // + // -> + // + // /* comments */ + // maps.Copy(m, x) + start, end = rng.Pos(), rng.End() + newText = fmt.Appendf(nil, "%s%s%s(%s, %s)", + allComments(file, start, end), + prefix, + funcName, + astutil.Format(pass.Fset, m), + astutil.Format(pass.Fset, x)) + } + pass.Report(analysis.Diagnostic{ + Pos: assign.Lhs[0].Pos(), + End: assign.Lhs[0].End(), + Message: "Replace m[k]=v loop with maps." + funcName, + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Replace m[k]=v loop with maps." + funcName, + TextEdits: append(importEdits, []analysis.TextEdit{{ + Pos: start, + End: end, + NewText: newText, + }}...), + }}, + }) + + } + + // Find all range loops around m[k] = v. + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + for curFile := range filesUsing(inspect, pass.TypesInfo, "go1.23") { + file := curFile.Node().(*ast.File) + + for curRange := range curFile.Preorder((*ast.RangeStmt)(nil)) { + rng := curRange.Node().(*ast.RangeStmt) + + if rng.Tok == token.DEFINE && + rng.Key != nil && + rng.Value != nil && + isAssignBlock(rng.Body) { + // Have: for k, v := range x { lhs = rhs } + + assign := rng.Body.List[0].(*ast.AssignStmt) + if index, ok := assign.Lhs[0].(*ast.IndexExpr); ok && + astutil.EqualSyntax(rng.Key, index.Index) && + astutil.EqualSyntax(rng.Value, assign.Rhs[0]) && + is[*types.Map](typeparams.CoreType(info.TypeOf(index.X))) && + types.Identical(info.TypeOf(index), info.TypeOf(rng.Value)) { // m[k], v + + // Have: for k, v := range x { m[k] = v } + // where there is no implicit conversion. + check(file, curRange, assign, index.X, rng.X) + } + } + } + } + return nil, nil +} + +// assignableToIterSeq2 reports whether t is assignable to +// iter.Seq[K, V] and returns K and V if so. +func assignableToIterSeq2(t types.Type) (k, v types.Type, ok bool) { + // The only named type assignable to iter.Seq2 is iter.Seq2. + if is[*types.Named](t) { + if !typesinternal.IsTypeNamed(t, "iter", "Seq2") { + return + } + t = t.Underlying() + } + + if t, ok := t.(*types.Signature); ok { + // func(yield func(K, V) bool)? + if t.Params().Len() == 1 && t.Results().Len() == 0 { + if yield, ok := t.Params().At(0).Type().(*types.Signature); ok { // sic, no Underlying/CoreType + if yield.Params().Len() == 2 && + yield.Results().Len() == 1 && + types.Identical(yield.Results().At(0).Type(), builtinBool.Type()) { + return yield.Params().At(0).Type(), yield.Params().At(1).Type(), true + } + } + } + } + return +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/minmax.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/minmax.go new file mode 100644 index 00000000000..7ebf837375a --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/minmax.go @@ -0,0 +1,440 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/edge" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysisinternal/generated" + typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal/typeindex" +) + +var MinMaxAnalyzer = &analysis.Analyzer{ + Name: "minmax", + Doc: analysisinternal.MustExtractDoc(doc, "minmax"), + Requires: []*analysis.Analyzer{ + generated.Analyzer, + inspect.Analyzer, + typeindexanalyzer.Analyzer, + }, + Run: minmax, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#minmax", +} + +// The minmax pass replaces if/else statements with calls to min or max, +// and removes user-defined min/max functions that are equivalent to built-ins. +// +// If/else replacement patterns: +// +// 1. if a < b { x = a } else { x = b } => x = min(a, b) +// 2. x = a; if a < b { x = b } => x = max(a, b) +// +// Pattern 1 requires that a is not NaN, and pattern 2 requires that b +// is not Nan. Since this is hard to prove, we reject floating-point +// numbers. +// +// Function removal: +// User-defined min/max functions are suggested for removal if they may +// be safely replaced by their built-in namesake. +// +// Variants: +// - all four ordered comparisons +// - "x := a" or "x = a" or "var x = a" in pattern 2 +// - "x < b" or "a < b" in pattern 2 +func minmax(pass *analysis.Pass) (any, error) { + skipGenerated(pass) + + // Check for user-defined min/max functions that can be removed + checkUserDefinedMinMax(pass) + + // check is called for all statements of this form: + // if a < b { lhs = rhs } + check := func(file *ast.File, curIfStmt inspector.Cursor, compare *ast.BinaryExpr) { + var ( + ifStmt = curIfStmt.Node().(*ast.IfStmt) + tassign = ifStmt.Body.List[0].(*ast.AssignStmt) + a = compare.X + b = compare.Y + lhs = tassign.Lhs[0] + rhs = tassign.Rhs[0] + sign = isInequality(compare.Op) + + // callArg formats a call argument, preserving comments from [start-end). + callArg = func(arg ast.Expr, start, end token.Pos) string { + comments := allComments(file, start, end) + return cond(arg == b, ", ", "") + // second argument needs a comma + cond(comments != "", "\n", "") + // comments need their own line + comments + + astutil.Format(pass.Fset, arg) + } + ) + + if fblock, ok := ifStmt.Else.(*ast.BlockStmt); ok && isAssignBlock(fblock) { + fassign := fblock.List[0].(*ast.AssignStmt) + + // Have: if a < b { lhs = rhs } else { lhs2 = rhs2 } + lhs2 := fassign.Lhs[0] + rhs2 := fassign.Rhs[0] + + // For pattern 1, check that: + // - lhs = lhs2 + // - {rhs,rhs2} = {a,b} + if astutil.EqualSyntax(lhs, lhs2) { + if astutil.EqualSyntax(rhs, a) && astutil.EqualSyntax(rhs2, b) { + sign = +sign + } else if astutil.EqualSyntax(rhs2, a) && astutil.EqualSyntax(rhs, b) { + sign = -sign + } else { + return + } + + sym := cond(sign < 0, "min", "max") + + if !is[*types.Builtin](lookup(pass.TypesInfo, curIfStmt, sym)) { + return // min/max function is shadowed + } + + // pattern 1 + // + // TODO(adonovan): if lhs is declared "var lhs T" on preceding line, + // simplify the whole thing to "lhs := min(a, b)". + pass.Report(analysis.Diagnostic{ + // Highlight the condition a < b. + Pos: compare.Pos(), + End: compare.End(), + Message: fmt.Sprintf("if/else statement can be modernized using %s", sym), + SuggestedFixes: []analysis.SuggestedFix{{ + Message: fmt.Sprintf("Replace if statement with %s", sym), + TextEdits: []analysis.TextEdit{{ + // Replace IfStmt with lhs = min(a, b). + Pos: ifStmt.Pos(), + End: ifStmt.End(), + NewText: fmt.Appendf(nil, "%s = %s(%s%s)", + astutil.Format(pass.Fset, lhs), + sym, + callArg(a, ifStmt.Pos(), ifStmt.Else.Pos()), + callArg(b, ifStmt.Else.Pos(), ifStmt.End()), + ), + }}, + }}, + }) + } + + } else if prev, ok := curIfStmt.PrevSibling(); ok && isSimpleAssign(prev.Node()) && ifStmt.Else == nil { + fassign := prev.Node().(*ast.AssignStmt) + + // Have: lhs0 = rhs0; if a < b { lhs = rhs } + // + // For pattern 2, check that + // - lhs = lhs0 + // - {a,b} = {rhs,rhs0} or {rhs,lhs0} + // The replacement must use rhs0 not lhs0 though. + // For example, we accept this variant: + // lhs = x; if lhs < y { lhs = y } => lhs = min(x, y), not min(lhs, y) + // + // TODO(adonovan): accept "var lhs0 = rhs0" form too. + lhs0 := fassign.Lhs[0] + rhs0 := fassign.Rhs[0] + + if astutil.EqualSyntax(lhs, lhs0) { + if astutil.EqualSyntax(rhs, a) && (astutil.EqualSyntax(rhs0, b) || astutil.EqualSyntax(lhs0, b)) { + sign = +sign + } else if (astutil.EqualSyntax(rhs0, a) || astutil.EqualSyntax(lhs0, a)) && astutil.EqualSyntax(rhs, b) { + sign = -sign + } else { + return + } + sym := cond(sign < 0, "min", "max") + + if !is[*types.Builtin](lookup(pass.TypesInfo, curIfStmt, sym)) { + return // min/max function is shadowed + } + + // Permit lhs0 to stand for rhs0 in the matching, + // but don't actually reduce to lhs0 = min(lhs0, rhs) + // since the "=" could be a ":=". Use min(rhs0, rhs). + if astutil.EqualSyntax(lhs0, a) { + a = rhs0 + } else if astutil.EqualSyntax(lhs0, b) { + b = rhs0 + } + + // pattern 2 + pass.Report(analysis.Diagnostic{ + // Highlight the condition a < b. + Pos: compare.Pos(), + End: compare.End(), + Message: fmt.Sprintf("if statement can be modernized using %s", sym), + SuggestedFixes: []analysis.SuggestedFix{{ + Message: fmt.Sprintf("Replace if/else with %s", sym), + TextEdits: []analysis.TextEdit{{ + Pos: fassign.Pos(), + End: ifStmt.End(), + // Replace "x := a; if ... {}" with "x = min(...)", preserving comments. + NewText: fmt.Appendf(nil, "%s %s %s(%s%s)", + astutil.Format(pass.Fset, lhs), + fassign.Tok.String(), + sym, + callArg(a, fassign.Pos(), ifStmt.Pos()), + callArg(b, ifStmt.Pos(), ifStmt.End()), + ), + }}, + }}, + }) + } + } + } + + // Find all "if a < b { lhs = rhs }" statements. + info := pass.TypesInfo + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + for curFile := range filesUsing(inspect, info, "go1.21") { + astFile := curFile.Node().(*ast.File) + for curIfStmt := range curFile.Preorder((*ast.IfStmt)(nil)) { + ifStmt := curIfStmt.Node().(*ast.IfStmt) + + // Don't bother handling "if a < b { lhs = rhs }" when it appears + // as the "else" branch of another if-statement. + // if cond { ... } else if a < b { lhs = rhs } + // (This case would require introducing another block + // if cond { ... } else { if a < b { lhs = rhs } } + // and checking that there is no following "else".) + if astutil.IsChildOf(curIfStmt, edge.IfStmt_Else) { + continue + } + + if compare, ok := ifStmt.Cond.(*ast.BinaryExpr); ok && + ifStmt.Init == nil && + isInequality(compare.Op) != 0 && + isAssignBlock(ifStmt.Body) { + // a blank var has no type. + if tLHS := info.TypeOf(ifStmt.Body.List[0].(*ast.AssignStmt).Lhs[0]); tLHS != nil && !maybeNaN(tLHS) { + // Have: if a < b { lhs = rhs } + check(astFile, curIfStmt, compare) + } + } + } + } + return nil, nil +} + +// allComments collects all the comments from start to end. +func allComments(file *ast.File, start, end token.Pos) string { + var buf strings.Builder + for co := range astutil.Comments(file, start, end) { + _, _ = fmt.Fprintf(&buf, "%s\n", co.Text) + } + return buf.String() +} + +// isInequality reports non-zero if tok is one of < <= => >: +// +1 for > and -1 for <. +func isInequality(tok token.Token) int { + switch tok { + case token.LEQ, token.LSS: + return -1 + case token.GEQ, token.GTR: + return +1 + } + return 0 +} + +// isAssignBlock reports whether b is a block of the form { lhs = rhs }. +func isAssignBlock(b *ast.BlockStmt) bool { + if len(b.List) != 1 { + return false + } + // Inv: the sole statement cannot be { lhs := rhs }. + return isSimpleAssign(b.List[0]) +} + +// isSimpleAssign reports whether n has the form "lhs = rhs" or "lhs := rhs". +func isSimpleAssign(n ast.Node) bool { + assign, ok := n.(*ast.AssignStmt) + return ok && + (assign.Tok == token.ASSIGN || assign.Tok == token.DEFINE) && + len(assign.Lhs) == 1 && + len(assign.Rhs) == 1 +} + +// maybeNaN reports whether t is (or may be) a floating-point type. +func maybeNaN(t types.Type) bool { + // For now, we rely on core types. + // TODO(adonovan): In the post-core-types future, + // follow the approach of types.Checker.applyTypeFunc. + t = typeparams.CoreType(t) + if t == nil { + return true // fail safe + } + if basic, ok := t.(*types.Basic); ok && basic.Info()&types.IsFloat != 0 { + return true + } + return false +} + +// checkUserDefinedMinMax looks for user-defined min/max functions that are +// equivalent to the built-in functions and suggests removing them. +func checkUserDefinedMinMax(pass *analysis.Pass) { + index := pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + + // Look up min and max functions by name in package scope + for _, funcName := range []string{"min", "max"} { + if fn, ok := pass.Pkg.Scope().Lookup(funcName).(*types.Func); ok { + // Use typeindex to get the FuncDecl directly + if def, ok := index.Def(fn); ok { + decl := def.Parent().Node().(*ast.FuncDecl) + // Check if this function matches the built-in min/max signature and behavior + if canUseBuiltinMinMax(fn, decl.Body) { + // Expand to include leading doc comment + pos := decl.Pos() + if docs := astutil.DocComment(decl); docs != nil { + pos = docs.Pos() + } + + pass.Report(analysis.Diagnostic{ + Pos: decl.Pos(), + End: decl.End(), + Message: fmt.Sprintf("user-defined %s function is equivalent to built-in %s and can be removed", funcName, funcName), + SuggestedFixes: []analysis.SuggestedFix{{ + Message: fmt.Sprintf("Remove user-defined %s function", funcName), + TextEdits: []analysis.TextEdit{{ + Pos: pos, + End: decl.End(), + }}, + }}, + }) + } + } + } + } +} + +// canUseBuiltinMinMax reports whether it is safe to replace a call +// to this min or max function by its built-in namesake. +func canUseBuiltinMinMax(fn *types.Func, body *ast.BlockStmt) bool { + sig := fn.Type().(*types.Signature) + + // Only consider the most common case: exactly 2 parameters + if sig.Params().Len() != 2 { + return false + } + + // Check if any parameter might be floating-point + for param := range sig.Params().Variables() { + if maybeNaN(param.Type()) { + return false // Don't suggest removal for float types due to NaN handling + } + } + + // Must have exactly one return value + if sig.Results().Len() != 1 { + return false + } + + // Check that the function body implements the expected min/max logic + if body == nil { + return false + } + + return hasMinMaxLogic(body, fn.Name()) +} + +// hasMinMaxLogic checks if the function body implements simple min/max logic. +func hasMinMaxLogic(body *ast.BlockStmt, funcName string) bool { + // Pattern 1: Single if/else statement + if len(body.List) == 1 { + if ifStmt, ok := body.List[0].(*ast.IfStmt); ok { + // Get the "false" result from the else block + if elseBlock, ok := ifStmt.Else.(*ast.BlockStmt); ok && len(elseBlock.List) == 1 { + if elseRet, ok := elseBlock.List[0].(*ast.ReturnStmt); ok && len(elseRet.Results) == 1 { + return checkMinMaxPattern(ifStmt, elseRet.Results[0], funcName) + } + } + } + } + + // Pattern 2: if statement followed by return + if len(body.List) == 2 { + if ifStmt, ok := body.List[0].(*ast.IfStmt); ok && ifStmt.Else == nil { + if retStmt, ok := body.List[1].(*ast.ReturnStmt); ok && len(retStmt.Results) == 1 { + return checkMinMaxPattern(ifStmt, retStmt.Results[0], funcName) + } + } + } + + return false +} + +// checkMinMaxPattern checks if an if statement implements min/max logic. +// ifStmt: the if statement to check +// falseResult: the expression returned when the condition is false +// funcName: "min" or "max" +func checkMinMaxPattern(ifStmt *ast.IfStmt, falseResult ast.Expr, funcName string) bool { + // Must have condition with comparison + cmp, ok := ifStmt.Cond.(*ast.BinaryExpr) + if !ok { + return false + } + + // Check if then branch returns one of the compared values + if len(ifStmt.Body.List) != 1 { + return false + } + + thenRet, ok := ifStmt.Body.List[0].(*ast.ReturnStmt) + if !ok || len(thenRet.Results) != 1 { + return false + } + + // Use the same logic as the existing minmax analyzer + sign := isInequality(cmp.Op) + if sign == 0 { + return false // Not a comparison operator + } + + t := thenRet.Results[0] // "true" result + f := falseResult // "false" result + x := cmp.X // left operand + y := cmp.Y // right operand + + // Check operand order and adjust sign accordingly + if astutil.EqualSyntax(t, x) && astutil.EqualSyntax(f, y) { + sign = +sign + } else if astutil.EqualSyntax(t, y) && astutil.EqualSyntax(f, x) { + sign = -sign + } else { + return false + } + + // Check if the sign matches the function name + return cond(sign < 0, "min", "max") == funcName +} + +// -- utils -- + +func is[T any](x any) bool { + _, ok := x.(T) + return ok +} + +func cond[T any](cond bool, t, f T) T { + if cond { + return t + } else { + return f + } +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/modernize.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/modernize.go new file mode 100644 index 00000000000..df23fc23c8e --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/modernize.go @@ -0,0 +1,162 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +import ( + _ "embed" + "go/ast" + "go/constant" + "go/format" + "go/token" + "go/types" + "iter" + "regexp" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/edge" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysisinternal/generated" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/moreiters" + "golang.org/x/tools/internal/packagepath" + "golang.org/x/tools/internal/stdlib" + "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/versions" +) + +//go:embed doc.go +var doc string + +// Suite lists all modernize analyzers. +var Suite = []*analysis.Analyzer{ + AnyAnalyzer, + // AppendClippedAnalyzer, // not nil-preserving! + BLoopAnalyzer, + FmtAppendfAnalyzer, + ForVarAnalyzer, + MapsLoopAnalyzer, + MinMaxAnalyzer, + NewExprAnalyzer, + OmitZeroAnalyzer, + plusBuildAnalyzer, + RangeIntAnalyzer, + ReflectTypeForAnalyzer, + SlicesContainsAnalyzer, + // SlicesDeleteAnalyzer, // not nil-preserving! + SlicesSortAnalyzer, + stditeratorsAnalyzer, + StringsCutPrefixAnalyzer, + StringsSeqAnalyzer, + StringsBuilderAnalyzer, + TestingContextAnalyzer, + WaitGroupAnalyzer, +} + +// -- helpers -- + +// skipGenerated decorates pass.Report to suppress diagnostics in generated files. +func skipGenerated(pass *analysis.Pass) { + report := pass.Report + pass.Report = func(diag analysis.Diagnostic) { + generated := pass.ResultOf[generated.Analyzer].(*generated.Result) + if generated.IsGenerated(diag.Pos) { + return // skip + } + report(diag) + } +} + +// formatExprs formats a comma-separated list of expressions. +func formatExprs(fset *token.FileSet, exprs []ast.Expr) string { + var buf strings.Builder + for i, e := range exprs { + if i > 0 { + buf.WriteString(", ") + } + format.Node(&buf, fset, e) // ignore errors + } + return buf.String() +} + +// isZeroIntLiteral reports whether e is an integer whose value is 0. +func isZeroIntLiteral(info *types.Info, e ast.Expr) bool { + return isIntLiteral(info, e, 0) +} + +// isIntLiteral reports whether e is an integer with given value. +func isIntLiteral(info *types.Info, e ast.Expr, n int64) bool { + return info.Types[e].Value == constant.MakeInt64(n) +} + +// filesUsing returns a cursor for each *ast.File in the inspector +// that uses at least the specified version of Go (e.g. "go1.24"). +// +// TODO(adonovan): opt: eliminate this function, instead following the +// approach of [fmtappendf], which uses typeindex and [fileUses]. +// See "Tip" at [fileUses] for motivation. +func filesUsing(inspect *inspector.Inspector, info *types.Info, version string) iter.Seq[inspector.Cursor] { + return func(yield func(inspector.Cursor) bool) { + for curFile := range inspect.Root().Children() { + file := curFile.Node().(*ast.File) + if !versions.Before(info.FileVersions[file], version) && !yield(curFile) { + break + } + } + } +} + +// fileUses reports whether the specified file uses at least the +// specified version of Go (e.g. "go1.24"). +// +// Tip: we recommend using this check "late", just before calling +// pass.Report, rather than "early" (when entering each ast.File, or +// each candidate node of interest, during the traversal), because the +// operation is not free, yet is not a highly selective filter: the +// fraction of files that pass most version checks is high and +// increases over time. +func fileUses(info *types.Info, file *ast.File, version string) bool { + return !versions.Before(info.FileVersions[file], version) +} + +// within reports whether the current pass is analyzing one of the +// specified standard packages or their dependencies. +func within(pass *analysis.Pass, pkgs ...string) bool { + path := pass.Pkg.Path() + return packagepath.IsStdPackage(path) && + moreiters.Contains(stdlib.Dependencies(pkgs...), path) +} + +// unparenEnclosing removes enclosing parens from cur in +// preparation for a call to [Cursor.ParentEdge]. +func unparenEnclosing(cur inspector.Cursor) inspector.Cursor { + for astutil.IsChildOf(cur, edge.ParenExpr_X) { + cur = cur.Parent() + } + return cur +} + +var ( + builtinAny = types.Universe.Lookup("any") + builtinAppend = types.Universe.Lookup("append") + builtinBool = types.Universe.Lookup("bool") + builtinInt = types.Universe.Lookup("int") + builtinFalse = types.Universe.Lookup("false") + builtinLen = types.Universe.Lookup("len") + builtinMake = types.Universe.Lookup("make") + builtinNew = types.Universe.Lookup("new") + builtinNil = types.Universe.Lookup("nil") + builtinString = types.Universe.Lookup("string") + builtinTrue = types.Universe.Lookup("true") + byteSliceType = types.NewSlice(types.Typ[types.Byte]) + omitemptyRegex = regexp.MustCompile(`(?:^json| json):"[^"]*(,omitempty)(?:"|,[^"]*")\s?`) +) + +// lookup returns the symbol denoted by name at the position of the cursor. +func lookup(info *types.Info, cur inspector.Cursor, name string) types.Object { + scope := typesinternal.EnclosingScope(info, cur) + _, obj := scope.LookupParent(name, cur.Node().Pos()) + return obj +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/newexpr.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/newexpr.go new file mode 100644 index 00000000000..b8893244d51 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/newexpr.go @@ -0,0 +1,208 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +import ( + _ "embed" + "go/ast" + "go/token" + "go/types" + "strings" + + "fmt" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/astutil" +) + +var NewExprAnalyzer = &analysis.Analyzer{ + Name: "newexpr", + Doc: analysisinternal.MustExtractDoc(doc, "newexpr"), + URL: "https://pkg.go.dev/golang.org/x/tools/gopls/internal/analysis/modernize#newexpr", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, + FactTypes: []analysis.Fact{&newLike{}}, +} + +func run(pass *analysis.Pass) (any, error) { + var ( + inspect = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + info = pass.TypesInfo + ) + + // Detect functions that are new-like, i.e. have the form: + // + // func f(x T) *T { return &x } + // + // meaning that it is equivalent to new(x), if x has type T. + for curFuncDecl := range inspect.Root().Preorder((*ast.FuncDecl)(nil)) { + decl := curFuncDecl.Node().(*ast.FuncDecl) + fn := info.Defs[decl.Name].(*types.Func) + if decl.Body != nil && len(decl.Body.List) == 1 { + if ret, ok := decl.Body.List[0].(*ast.ReturnStmt); ok && len(ret.Results) == 1 { + if unary, ok := ret.Results[0].(*ast.UnaryExpr); ok && unary.Op == token.AND { + if id, ok := unary.X.(*ast.Ident); ok { + if v, ok := info.Uses[id].(*types.Var); ok { + sig := fn.Signature() + if sig.Results().Len() == 1 && + is[*types.Pointer](sig.Results().At(0).Type()) && // => no iface conversion + sig.Params().Len() == 1 && + sig.Params().At(0) == v { + + // Export a fact for each one. + pass.ExportObjectFact(fn, &newLike{}) + + // Check file version. + file := astutil.EnclosingFile(curFuncDecl) + if !fileUses(info, file, "go1.26") { + continue // new(expr) not available in this file + } + + var edits []analysis.TextEdit + + // If 'new' is not shadowed, replace func body: &x -> new(x). + // This makes it safely and cleanly inlinable. + curRet, _ := curFuncDecl.FindNode(ret) + if lookup(info, curRet, "new") == builtinNew { + edits = []analysis.TextEdit{ + // return &x + // ---- - + // return new(x) + { + Pos: unary.OpPos, + End: unary.OpPos + token.Pos(len("&")), + NewText: []byte("new("), + }, + { + Pos: unary.X.End(), + End: unary.X.End(), + NewText: []byte(")"), + }, + } + } + + // Disabled until we resolve https://go.dev/issue/75726 + // (Go version skew between caller and callee in inliner.) + // TODO(adonovan): fix and reenable. + // + // Also, restore these lines to our section of doc.go: + // //go:fix inline + // ... + // (The directive comment causes the inline analyzer to suggest + // that calls to such functions are inlined.) + if false { + // Add a //go:fix inline annotation, if not already present. + // TODO(adonovan): use ast.ParseDirective when go1.26 is assured. + if !strings.Contains(decl.Doc.Text(), "go:fix inline") { + edits = append(edits, analysis.TextEdit{ + Pos: decl.Pos(), + End: decl.Pos(), + NewText: []byte("//go:fix inline\n"), + }) + } + } + + if len(edits) > 0 { + pass.Report(analysis.Diagnostic{ + Pos: decl.Name.Pos(), + End: decl.Name.End(), + Message: fmt.Sprintf("%s can be an inlinable wrapper around new(expr)", decl.Name), + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Make %s an inlinable wrapper around new(expr)", + TextEdits: edits, + }, + }, + }) + } + } + } + } + } + } + } + } + + // Report and transform calls, when safe. + // In effect, this is inlining the new-like function + // even before we have marked the callee with //go:fix inline. + for curCall := range inspect.Root().Preorder((*ast.CallExpr)(nil)) { + call := curCall.Node().(*ast.CallExpr) + var fact newLike + if fn, ok := typeutil.Callee(info, call).(*types.Func); ok && + pass.ImportObjectFact(fn, &fact) { + + // Check file version. + file := astutil.EnclosingFile(curCall) + if !fileUses(info, file, "go1.26") { + continue // new(expr) not available in this file + } + + // Check new is not shadowed. + if lookup(info, curCall, "new") != builtinNew { + continue + } + + // The return type *T must exactly match the argument type T. + // (We formulate it this way--not in terms of the parameter + // type--to support generics.) + var targ types.Type + { + arg := call.Args[0] + tvarg := info.Types[arg] + + // Constants: we must work around the type checker + // bug that causes info.Types to wrongly report the + // "typed" type for an untyped constant. + // (See "historical reasons" in issue go.dev/issue/70638.) + // + // We don't have a reliable way to do this but we can attempt + // to re-typecheck the constant expression on its own, in + // the original lexical environment but not as a part of some + // larger expression that implies a conversion to some "typed" type. + // (For the genesis of this idea see (*state).arguments + // in ../../../../internal/refactor/inline/inline.go.) + if tvarg.Value != nil { + info2 := &types.Info{Types: make(map[ast.Expr]types.TypeAndValue)} + if err := types.CheckExpr(token.NewFileSet(), pass.Pkg, token.NoPos, arg, info2); err != nil { + continue // unexpected error + } + tvarg = info2.Types[arg] + } + + targ = types.Default(tvarg.Type) + } + if !types.Identical(types.NewPointer(targ), info.TypeOf(call)) { + continue + } + + pass.Report(analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fmt.Sprintf("call of %s(x) can be simplified to new(x)", fn.Name()), + SuggestedFixes: []analysis.SuggestedFix{{ + Message: fmt.Sprintf("Simplify %s(x) to new(x)", fn.Name()), + TextEdits: []analysis.TextEdit{{ + Pos: call.Fun.Pos(), + End: call.Fun.End(), + NewText: []byte("new"), + }}, + }}, + }) + } + } + + return nil, nil +} + +// A newLike fact records that its associated function is "new-like". +type newLike struct{} + +func (*newLike) AFact() {} +func (*newLike) String() string { return "newlike" } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/omitzero.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/omitzero.go new file mode 100644 index 00000000000..bd309cf9d59 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/omitzero.go @@ -0,0 +1,119 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +import ( + "go/ast" + "go/types" + "reflect" + "strconv" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysisinternal/generated" + "golang.org/x/tools/internal/astutil" +) + +var OmitZeroAnalyzer = &analysis.Analyzer{ + Name: "omitzero", + Doc: analysisinternal.MustExtractDoc(doc, "omitzero"), + Requires: []*analysis.Analyzer{ + generated.Analyzer, + inspect.Analyzer, + }, + Run: omitzero, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#omitzero", +} + +func checkOmitEmptyField(pass *analysis.Pass, info *types.Info, curField *ast.Field) { + typ := info.TypeOf(curField.Type) + _, ok := typ.Underlying().(*types.Struct) + if !ok { + // Not a struct + return + } + tag := curField.Tag + if tag == nil { + // No tag to check + return + } + // The omitempty tag may be used by other packages besides json, but we should only modify its use with json + tagconv, _ := strconv.Unquote(tag.Value) + match := omitemptyRegex.FindStringSubmatchIndex(tagconv) + if match == nil { + // No omitempty in json tag + return + } + omitEmptyPos, omitEmptyEnd, err := astutil.RangeInStringLiteral(curField.Tag, match[2], match[3]) + if err != nil { + return + } + removePos, removeEnd := omitEmptyPos, omitEmptyEnd + + jsonTag := reflect.StructTag(tagconv).Get("json") + if jsonTag == ",omitempty" { + // Remove the entire struct tag if json is the only package used + if match[1]-match[0] == len(tagconv) { + removePos = curField.Tag.Pos() + removeEnd = curField.Tag.End() + } else { + // Remove the json tag if omitempty is the only field + removePos, err = astutil.PosInStringLiteral(curField.Tag, match[0]) + if err != nil { + return + } + removeEnd, err = astutil.PosInStringLiteral(curField.Tag, match[1]) + if err != nil { + return + } + } + } + pass.Report(analysis.Diagnostic{ + Pos: curField.Tag.Pos(), + End: curField.Tag.End(), + Message: "Omitempty has no effect on nested struct fields", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Remove redundant omitempty tag", + TextEdits: []analysis.TextEdit{ + { + Pos: removePos, + End: removeEnd, + }, + }, + }, + { + Message: "Replace omitempty with omitzero (behavior change)", + TextEdits: []analysis.TextEdit{ + { + Pos: omitEmptyPos, + End: omitEmptyEnd, + NewText: []byte(",omitzero"), + }, + }, + }, + }}) +} + +// The omitzero pass searches for instances of "omitempty" in a json field tag on a +// struct. Since "omitempty" does not have any effect when applied to a struct field, +// it suggests either deleting "omitempty" or replacing it with "omitzero", which +// correctly excludes structs from a json encoding. +func omitzero(pass *analysis.Pass) (any, error) { + skipGenerated(pass) + + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + info := pass.TypesInfo + for curFile := range filesUsing(inspect, info, "go1.24") { + for curStruct := range curFile.Preorder((*ast.StructType)(nil)) { + for _, curField := range curStruct.Node().(*ast.StructType).Fields.List { + checkOmitEmptyField(pass, info, curField) + } + } + } + return nil, nil +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/plusbuild.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/plusbuild.go new file mode 100644 index 00000000000..e8af8074ff6 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/plusbuild.go @@ -0,0 +1,83 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +import ( + "go/ast" + "go/parser" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/goplsexport" +) + +var plusBuildAnalyzer = &analysis.Analyzer{ + Name: "plusbuild", + Doc: analysisinternal.MustExtractDoc(doc, "plusbuild"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#plusbuild", + Run: plusbuild, +} + +func init() { + // Export to gopls until this is a published modernizer. + goplsexport.PlusBuildModernizer = plusBuildAnalyzer +} + +func plusbuild(pass *analysis.Pass) (any, error) { + check := func(f *ast.File) { + if !fileUses(pass.TypesInfo, f, "go1.18") { + return + } + + // When gofmt sees a +build comment, it adds a + // preceding equivalent //go:build directive, so in + // formatted files we can assume that a +build line is + // part of a comment group that starts with a + // //go:build line and is followed by a blank line. + // + // While we cannot delete comments from an AST and + // expect consistent output in general, this specific + // case--deleting only some lines from a comment + // block--does format correctly. + for _, g := range f.Comments { + sawGoBuild := false + for _, c := range g.List { + if sawGoBuild && strings.HasPrefix(c.Text, "// +build ") { + pass.Report(analysis.Diagnostic{ + Pos: c.Pos(), + End: c.End(), + Message: "+build line is no longer needed", + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Remove obsolete +build line", + TextEdits: []analysis.TextEdit{{ + Pos: c.Pos(), + End: c.End(), + }}, + }}, + }) + break + } + if strings.HasPrefix(c.Text, "//go:build ") { + sawGoBuild = true + } + } + } + } + + for _, f := range pass.Files { + check(f) + } + for _, name := range pass.IgnoredFiles { + if strings.HasSuffix(name, ".go") { + f, err := parser.ParseFile(pass.Fset, name, nil, parser.ParseComments|parser.SkipObjectResolution) + if err != nil { + continue // parse error: ignore + } + check(f) + } + } + return nil, nil +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/rangeint.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/rangeint.go new file mode 100644 index 00000000000..adc840f11d5 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/rangeint.go @@ -0,0 +1,310 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/edge" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysisinternal/generated" + typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/typesinternal/typeindex" +) + +var RangeIntAnalyzer = &analysis.Analyzer{ + Name: "rangeint", + Doc: analysisinternal.MustExtractDoc(doc, "rangeint"), + Requires: []*analysis.Analyzer{ + generated.Analyzer, + inspect.Analyzer, + typeindexanalyzer.Analyzer, + }, + Run: rangeint, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#rangeint", +} + +// rangeint offers a fix to replace a 3-clause 'for' loop: +// +// for i := 0; i < limit; i++ {} +// +// by a range loop with an integer operand: +// +// for i := range limit {} +// +// Variants: +// - The ':=' may be replaced by '='. +// - The fix may remove "i :=" if it would become unused. +// +// Restrictions: +// - The variable i must not be assigned or address-taken within the +// loop, because a "for range int" loop does not respect assignments +// to the loop index. +// - The limit must not be b.N, to avoid redundancy with bloop's fixes. +// +// Caveats: +// +// The fix causes the limit expression to be evaluated exactly once, +// instead of once per iteration. So, to avoid changing the +// cardinality of side effects, the limit expression must not involve +// function calls (e.g. seq.Len()) or channel receives. Moreover, the +// value of the limit expression must be loop invariant, which in +// practice means it must take one of the following forms: +// +// - a local variable that is assigned only once and not address-taken; +// - a constant; or +// - len(s), where s has the above properties. +func rangeint(pass *analysis.Pass) (any, error) { + skipGenerated(pass) + + info := pass.TypesInfo + + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + typeindex := pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + + for curFile := range filesUsing(inspect, info, "go1.22") { + nextLoop: + for curLoop := range curFile.Preorder((*ast.ForStmt)(nil)) { + loop := curLoop.Node().(*ast.ForStmt) + if init, ok := loop.Init.(*ast.AssignStmt); ok && + isSimpleAssign(init) && + is[*ast.Ident](init.Lhs[0]) && + isZeroIntLiteral(info, init.Rhs[0]) { + // Have: for i = 0; ... (or i := 0) + index := init.Lhs[0].(*ast.Ident) + + if compare, ok := loop.Cond.(*ast.BinaryExpr); ok && + compare.Op == token.LSS && + astutil.EqualSyntax(compare.X, init.Lhs[0]) { + // Have: for i = 0; i < limit; ... {} + + limit := compare.Y + + // If limit is "len(slice)", simplify it to "slice". + // + // (Don't replace "for i := 0; i < len(map); i++" + // with "for range m" because it's too hard to prove + // that len(m) is loop-invariant). + if call, ok := limit.(*ast.CallExpr); ok && + typeutil.Callee(info, call) == builtinLen && + is[*types.Slice](info.TypeOf(call.Args[0]).Underlying()) { + limit = call.Args[0] + } + + // Check the form of limit: must be a constant, + // or a local var that is not assigned or address-taken. + limitOK := false + if info.Types[limit].Value != nil { + limitOK = true // constant + } else if id, ok := limit.(*ast.Ident); ok { + if v, ok := info.Uses[id].(*types.Var); ok && + !(v.Exported() && typesinternal.IsPackageLevel(v)) { + // limit is a local or unexported global var. + // (An exported global may have uses we can't see.) + for cur := range typeindex.Uses(v) { + if isScalarLvalue(info, cur) { + // Limit var is assigned or address-taken. + continue nextLoop + } + } + limitOK = true + } + } + if !limitOK { + continue nextLoop + } + + if inc, ok := loop.Post.(*ast.IncDecStmt); ok && + inc.Tok == token.INC && + astutil.EqualSyntax(compare.X, inc.X) { + // Have: for i = 0; i < limit; i++ {} + + // Find references to i within the loop body. + v := info.ObjectOf(index).(*types.Var) + // TODO(adonovan): use go1.25 v.Kind() == types.PackageVar + if typesinternal.IsPackageLevel(v) { + continue nextLoop + } + used := false + for curId := range curLoop.Child(loop.Body).Preorder((*ast.Ident)(nil)) { + id := curId.Node().(*ast.Ident) + if info.Uses[id] == v { + used = true + + // Reject if any is an l-value (assigned or address-taken): + // a "for range int" loop does not respect assignments to + // the loop variable. + if isScalarLvalue(info, curId) { + continue nextLoop + } + } + } + + // If i is no longer used, delete "i := ". + var edits []analysis.TextEdit + if !used && init.Tok == token.DEFINE { + edits = append(edits, analysis.TextEdit{ + Pos: index.Pos(), + End: init.Rhs[0].Pos(), + }) + } + + // If i is used after the loop, + // don't offer a fix, as a range loop + // leaves i with a different final value (limit-1). + if init.Tok == token.ASSIGN { + for curId := range curLoop.Parent().Preorder((*ast.Ident)(nil)) { + id := curId.Node().(*ast.Ident) + if info.Uses[id] == v { + // Is i used after loop? + if id.Pos() > loop.End() { + continue nextLoop + } + // Is i used within a defer statement + // that is within the scope of i? + // var i int + // defer func() { print(i)} + // for i = ... { ... } + for curDefer := range curId.Enclosing((*ast.DeferStmt)(nil)) { + if curDefer.Node().Pos() > v.Pos() { + continue nextLoop + } + } + } + } + } + + // If limit is len(slice), + // simplify "range len(slice)" to "range slice". + if call, ok := limit.(*ast.CallExpr); ok && + typeutil.Callee(info, call) == builtinLen && + is[*types.Slice](info.TypeOf(call.Args[0]).Underlying()) { + limit = call.Args[0] + } + + // If the limit is a untyped constant of non-integer type, + // such as "const limit = 1e3", its effective type may + // differ between the two forms. + // In a for loop, it must be comparable with int i, + // for i := 0; i < limit; i++ + // but in a range loop it would become a float, + // for i := range limit {} + // which is a type error. We need to convert it to int + // in this case. + // + // Unfortunately go/types discards the untyped type + // (but see Untyped in golang/go#70638) so we must + // re-type check the expression to detect this case. + var beforeLimit, afterLimit string + if v := info.Types[limit].Value; v != nil { + tVar := info.TypeOf(init.Rhs[0]) + file := curFile.Node().(*ast.File) + // TODO(mkalil): use a types.Qualifier that respects the existing + // imports of this file that are visible (not shadowed) at the current position. + qual := typesinternal.FileQualifier(file, pass.Pkg) + beforeLimit, afterLimit = fmt.Sprintf("%s(", types.TypeString(tVar, qual)), ")" + info2 := &types.Info{Types: make(map[ast.Expr]types.TypeAndValue)} + if types.CheckExpr(pass.Fset, pass.Pkg, limit.Pos(), limit, info2) == nil { + tLimit := types.Default(info2.TypeOf(limit)) + if types.AssignableTo(tLimit, tVar) { + beforeLimit, afterLimit = "", "" + } + } + } + + pass.Report(analysis.Diagnostic{ + Pos: init.Pos(), + End: inc.End(), + Message: "for loop can be modernized using range over int", + SuggestedFixes: []analysis.SuggestedFix{{ + Message: fmt.Sprintf("Replace for loop with range %s", + astutil.Format(pass.Fset, limit)), + TextEdits: append(edits, []analysis.TextEdit{ + // for i := 0; i < limit; i++ {} + // ----- --- + // ------- + // for i := range limit {} + + // Delete init. + { + Pos: init.Rhs[0].Pos(), + End: limit.Pos(), + NewText: []byte("range "), + }, + // Add "int(" before limit, if needed. + { + Pos: limit.Pos(), + End: limit.Pos(), + NewText: []byte(beforeLimit), + }, + // Delete inc. + { + Pos: limit.End(), + End: inc.End(), + }, + // Add ")" after limit, if needed. + { + Pos: limit.End(), + End: limit.End(), + NewText: []byte(afterLimit), + }, + }...), + }}, + }) + } + } + } + } + } + return nil, nil +} + +// isScalarLvalue reports whether the specified identifier is +// address-taken or appears on the left side of an assignment. +// +// This function is valid only for scalars (x = ...), +// not for aggregates (x.a[i] = ...) +func isScalarLvalue(info *types.Info, curId inspector.Cursor) bool { + // Unfortunately we can't simply use info.Types[e].Assignable() + // as it is always true for a variable even when that variable is + // used only as an r-value. So we must inspect enclosing syntax. + + cur := curId + + // Strip enclosing parens. + ek, _ := cur.ParentEdge() + for ek == edge.ParenExpr_X { + cur = cur.Parent() + ek, _ = cur.ParentEdge() + } + + switch ek { + case edge.AssignStmt_Lhs: + assign := cur.Parent().Node().(*ast.AssignStmt) + if assign.Tok != token.DEFINE { + return true // i = j or i += j + } + id := curId.Node().(*ast.Ident) + if v, ok := info.Defs[id]; ok && v.Pos() != id.Pos() { + return true // reassignment of i (i, j := 1, 2) + } + case edge.IncDecStmt_X: + return true // i++, i-- + case edge.UnaryExpr_X: + if cur.Parent().Node().(*ast.UnaryExpr).Op == token.AND { + return true // &i + } + } + return false +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/reflect.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/reflect.go new file mode 100644 index 00000000000..c9b0fa42eed --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/reflect.go @@ -0,0 +1,143 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +// This file defines modernizers that use the "reflect" package. + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/edge" + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysisinternal/generated" + typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/refactor" + "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" +) + +var ReflectTypeForAnalyzer = &analysis.Analyzer{ + Name: "reflecttypefor", + Doc: analysisinternal.MustExtractDoc(doc, "reflecttypefor"), + Requires: []*analysis.Analyzer{ + generated.Analyzer, + inspect.Analyzer, + typeindexanalyzer.Analyzer, + }, + Run: reflecttypefor, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#reflecttypefor", +} + +func reflecttypefor(pass *analysis.Pass) (any, error) { + skipGenerated(pass) + + var ( + index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + info = pass.TypesInfo + + reflectTypeOf = index.Object("reflect", "TypeOf") + ) + + for curCall := range index.Calls(reflectTypeOf) { + call := curCall.Node().(*ast.CallExpr) + // Have: reflect.TypeOf(expr) + + expr := call.Args[0] + if !typesinternal.NoEffects(info, expr) { + continue // don't eliminate operand: may have effects + } + + t := info.TypeOf(expr) + var edits []analysis.TextEdit + + // Special case for TypeOf((*T)(nil)).Elem(), + // needed when T is an interface type. + if astutil.IsChildOf(curCall, edge.SelectorExpr_X) { + curSel := unparenEnclosing(curCall).Parent() + if astutil.IsChildOf(curSel, edge.CallExpr_Fun) { + call2 := unparenEnclosing(curSel).Parent().Node().(*ast.CallExpr) + obj := typeutil.Callee(info, call2) + if typesinternal.IsMethodNamed(obj, "reflect", "Type", "Elem") { + if ptr, ok := t.(*types.Pointer); ok { + // Have: TypeOf(expr).Elem() where expr : *T + t = ptr.Elem() + // reflect.TypeOf(expr).Elem() + // ------- + // reflect.TypeOf(expr) + edits = []analysis.TextEdit{{ + Pos: call.End(), + End: call2.End(), + }} + } + } + } + } + + // TypeOf(x) where x has an interface type is a + // dynamic operation; don't transform it to TypeFor. + // (edits == nil means "not the Elem() special case".) + if types.IsInterface(t) && edits == nil { + continue + } + + file := astutil.EnclosingFile(curCall) + if versions.Before(info.FileVersions[file], "go1.22") { + continue // TypeFor requires go1.22 + } + tokFile := pass.Fset.File(file.Pos()) + + // Format the type as valid Go syntax. + // TODO(adonovan): FileQualifier needs to respect + // visibility at the current point, and either fail + // or edit the imports as needed. + qual := typesinternal.FileQualifier(file, pass.Pkg) + tstr := types.TypeString(t, qual) + + sel, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + continue // e.g. reflect was dot-imported + } + + // If the call argument contains the last use + // of a variable, as in: + // var zero T + // reflect.TypeOf(zero) + // remove the declaration of that variable. + curArg0 := curCall.ChildAt(edge.CallExpr_Args, 0) + edits = append(edits, refactor.DeleteUnusedVars(index, info, tokFile, curArg0)...) + + pass.Report(analysis.Diagnostic{ + Pos: call.Fun.Pos(), + End: call.Fun.End(), + Message: "reflect.TypeOf call can be simplified using TypeFor", + SuggestedFixes: []analysis.SuggestedFix{{ + // reflect.TypeOf (...T value...) + // ------ ------------- + // reflect.TypeFor[T]( ) + Message: "Replace TypeOf by TypeFor", + TextEdits: append([]analysis.TextEdit{ + { + Pos: sel.Sel.Pos(), + End: sel.Sel.End(), + NewText: []byte("TypeFor[" + tstr + "]"), + }, + // delete (pure) argument + { + Pos: call.Lparen + 1, + End: call.Rparen, + }, + }, edits...), + }}, + }) + } + + return nil, nil +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/slices.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/slices.go new file mode 100644 index 00000000000..032f874df1a --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/slices.go @@ -0,0 +1,300 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +import ( + "fmt" + "go/ast" + "go/types" + "slices" + "strconv" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysisinternal/generated" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/refactor" + "golang.org/x/tools/internal/typesinternal" +) + +// Warning: this analyzer is not safe to enable by default. +var AppendClippedAnalyzer = &analysis.Analyzer{ + Name: "appendclipped", + Doc: analysisinternal.MustExtractDoc(doc, "appendclipped"), + Requires: []*analysis.Analyzer{ + generated.Analyzer, + inspect.Analyzer, + }, + Run: appendclipped, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#appendclipped", +} + +// The appendclipped pass offers to simplify a tower of append calls: +// +// append(append(append(base, a...), b..., c...) +// +// with a call to go1.21's slices.Concat(base, a, b, c), or simpler +// replacements such as slices.Clone(a) in degenerate cases. +// +// We offer bytes.Clone in preference to slices.Clone where +// appropriate, if the package already imports "bytes"; +// their behaviors are identical. +// +// The base expression must denote a clipped slice (see [isClipped] +// for definition), otherwise the replacement might eliminate intended +// side effects to the base slice's array. +// +// Examples: +// +// append(append(append(x[:0:0], a...), b...), c...) -> slices.Concat(a, b, c) +// append(append(slices.Clip(a), b...) -> slices.Concat(a, b) +// append([]T{}, a...) -> slices.Clone(a) +// append([]string(nil), os.Environ()...) -> os.Environ() +// +// The fix does not always preserve nilness the of base slice when the +// addends (a, b, c) are all empty (see #73557). +func appendclipped(pass *analysis.Pass) (any, error) { + skipGenerated(pass) + + // Skip the analyzer in packages where its + // fixes would create an import cycle. + if within(pass, "slices", "bytes", "runtime") { + return nil, nil + } + + info := pass.TypesInfo + + // sliceArgs is a non-empty (reversed) list of slices to be concatenated. + simplifyAppendEllipsis := func(file *ast.File, call *ast.CallExpr, base ast.Expr, sliceArgs []ast.Expr) { + // Only appends whose base is a clipped slice can be simplified: + // We must conservatively assume an append to an unclipped slice + // such as append(y[:0], x...) is intended to have effects on y. + clipped, empty := clippedSlice(info, base) + if clipped == nil { + return + } + + // If any slice arg has a different type from the base + // (and thus the result) don't offer a fix, to avoid + // changing the return type, e.g: + // + // type S []int + // - x := append([]int(nil), S{}...) // x : []int + // + x := slices.Clone(S{}) // x : S + // + // We could do better by inserting an explicit generic + // instantiation: + // + // x := slices.Clone[[]int](S{}) + // + // but this is often unnecessary and unwanted, such as + // when the value is used an in assignment context that + // provides an explicit type: + // + // var x []int = slices.Clone(S{}) + baseType := info.TypeOf(base) + for _, arg := range sliceArgs { + if !types.Identical(info.TypeOf(arg), baseType) { + return + } + } + + // If the (clipped) base is empty, it may be safely ignored. + // Otherwise treat it (or its unclipped subexpression, if possible) + // as just another arg (the first) to Concat. + // + // TODO(adonovan): not so fast! If all the operands + // are empty, then the nilness of base matters, because + // append preserves nilness whereas Concat does not (#73557). + if !empty { + sliceArgs = append(sliceArgs, clipped) + } + slices.Reverse(sliceArgs) + + // TODO(adonovan): simplify sliceArgs[0] further: slices.Clone(s) -> s + + // Concat of a single (non-trivial) slice degenerates to Clone. + if len(sliceArgs) == 1 { + s := sliceArgs[0] + + // Special case for common but redundant clone of os.Environ(). + // append(zerocap, os.Environ()...) -> os.Environ() + if scall, ok := s.(*ast.CallExpr); ok { + obj := typeutil.Callee(info, scall) + if typesinternal.IsFunctionNamed(obj, "os", "Environ") { + pass.Report(analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: "Redundant clone of os.Environ()", + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Eliminate redundant clone", + TextEdits: []analysis.TextEdit{{ + Pos: call.Pos(), + End: call.End(), + NewText: []byte(astutil.Format(pass.Fset, s)), + }}, + }}, + }) + return + } + } + + // If the slice type is []byte, and the file imports + // "bytes" but not "slices", prefer the (behaviorally + // identical) bytes.Clone for local consistency. + // https://go.dev/issue/70815#issuecomment-2671572984 + fileImports := func(path string) bool { + return slices.ContainsFunc(file.Imports, func(spec *ast.ImportSpec) bool { + value, _ := strconv.Unquote(spec.Path.Value) + return value == path + }) + } + clonepkg := cond( + types.Identical(info.TypeOf(call), byteSliceType) && + !fileImports("slices") && fileImports("bytes"), + "bytes", + "slices") + + // append(zerocap, s...) -> slices.Clone(s) or bytes.Clone(s) + // + // This is unsound if s is empty and its nilness + // differs from zerocap (#73557). + prefix, importEdits := refactor.AddImport(info, file, clonepkg, clonepkg, "Clone", call.Pos()) + message := fmt.Sprintf("Replace append with %s.Clone", clonepkg) + pass.Report(analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: message, + SuggestedFixes: []analysis.SuggestedFix{{ + Message: message, + TextEdits: append(importEdits, []analysis.TextEdit{{ + Pos: call.Pos(), + End: call.End(), + NewText: fmt.Appendf(nil, "%sClone(%s)", prefix, astutil.Format(pass.Fset, s)), + }}...), + }}, + }) + return + } + + // append(append(append(base, a...), b..., c...) -> slices.Concat(base, a, b, c) + // + // This is unsound if all slices are empty and base is non-nil (#73557). + prefix, importEdits := refactor.AddImport(info, file, "slices", "slices", "Concat", call.Pos()) + pass.Report(analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: "Replace append with slices.Concat", + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Replace append with slices.Concat", + TextEdits: append(importEdits, []analysis.TextEdit{{ + Pos: call.Pos(), + End: call.End(), + NewText: fmt.Appendf(nil, "%sConcat(%s)", prefix, formatExprs(pass.Fset, sliceArgs)), + }}...), + }}, + }) + } + + // Mark nested calls to append so that we don't emit diagnostics for them. + skip := make(map[*ast.CallExpr]bool) + + // Visit calls of form append(x, y...). + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + for curFile := range filesUsing(inspect, info, "go1.21") { + file := curFile.Node().(*ast.File) + + for curCall := range curFile.Preorder((*ast.CallExpr)(nil)) { + call := curCall.Node().(*ast.CallExpr) + if skip[call] { + continue + } + + // Recursively unwrap ellipsis calls to append, so + // append(append(append(base, a...), b..., c...) + // yields (base, [c b a]). + base, slices := ast.Expr(call), []ast.Expr(nil) // base case: (call, nil) + again: + if call, ok := base.(*ast.CallExpr); ok { + if id, ok := call.Fun.(*ast.Ident); ok && + call.Ellipsis.IsValid() && + len(call.Args) == 2 && + info.Uses[id] == builtinAppend { + + // Have: append(base, s...) + base, slices = call.Args[0], append(slices, call.Args[1]) + skip[call] = true + goto again + } + } + + if len(slices) > 0 { + simplifyAppendEllipsis(file, call, base, slices) + } + } + } + return nil, nil +} + +// clippedSlice returns res != nil if e denotes a slice that is +// definitely clipped, that is, its len(s)==cap(s). +// +// The value of res is either the same as e or is a subexpression of e +// that denotes the same slice but without the clipping operation. +// +// In addition, it reports whether the slice is definitely empty. +// +// Examples of clipped slices: +// +// x[:0:0] (empty) +// []T(nil) (empty) +// Slice{} (empty) +// x[:len(x):len(x)] (nonempty) res=x +// x[:k:k] (nonempty) +// slices.Clip(x) (nonempty) res=x +// +// TODO(adonovan): Add a check that the expression x has no side effects in +// case x[:len(x):len(x)] -> x. Now the program behavior may change. +func clippedSlice(info *types.Info, e ast.Expr) (res ast.Expr, empty bool) { + switch e := e.(type) { + case *ast.SliceExpr: + // x[:0:0], x[:len(x):len(x)], x[:k:k] + if e.Slice3 && e.High != nil && e.Max != nil && astutil.EqualSyntax(e.High, e.Max) { // x[:k:k] + res = e + empty = isZeroIntLiteral(info, e.High) // x[:0:0] + if call, ok := e.High.(*ast.CallExpr); ok && + typeutil.Callee(info, call) == builtinLen && + astutil.EqualSyntax(call.Args[0], e.X) { + res = e.X // x[:len(x):len(x)] -> x + } + return + } + return + + case *ast.CallExpr: + // []T(nil)? + if info.Types[e.Fun].IsType() && + is[*ast.Ident](e.Args[0]) && + info.Uses[e.Args[0].(*ast.Ident)] == builtinNil { + return e, true + } + + // slices.Clip(x)? + obj := typeutil.Callee(info, e) + if typesinternal.IsFunctionNamed(obj, "slices", "Clip") { + return e.Args[0], false // slices.Clip(x) -> x + } + + case *ast.CompositeLit: + // Slice{}? + if len(e.Elts) == 0 { + return e, true + } + } + return nil, false +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/slicescontains.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/slicescontains.go new file mode 100644 index 00000000000..b3c2e562c9a --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/slicescontains.go @@ -0,0 +1,433 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysisinternal/generated" + typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/refactor" + "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal/typeindex" +) + +var SlicesContainsAnalyzer = &analysis.Analyzer{ + Name: "slicescontains", + Doc: analysisinternal.MustExtractDoc(doc, "slicescontains"), + Requires: []*analysis.Analyzer{ + generated.Analyzer, + inspect.Analyzer, + typeindexanalyzer.Analyzer, + }, + Run: slicescontains, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#slicescontains", +} + +// The slicescontains pass identifies loops that can be replaced by a +// call to slices.Contains{,Func}. For example: +// +// for i, elem := range s { +// if elem == needle { +// ... +// break +// } +// } +// +// => +// +// if slices.Contains(s, needle) { ... } +// +// Variants: +// - if the if-condition is f(elem), the replacement +// uses slices.ContainsFunc(s, f). +// - if the if-body is "return true" and the fallthrough +// statement is "return false" (or vice versa), the +// loop becomes "return [!]slices.Contains(...)". +// - if the if-body is "found = true" and the previous +// statement is "found = false" (or vice versa), the +// loop becomes "found = [!]slices.Contains(...)". +// +// It may change cardinality of effects of the "needle" expression. +// (Mostly this appears to be a desirable optimization, avoiding +// redundantly repeated evaluation.) +// +// TODO(adonovan): Add a check that needle/predicate expression from +// if-statement has no effects. Now the program behavior may change. +func slicescontains(pass *analysis.Pass) (any, error) { + skipGenerated(pass) + + // Skip the analyzer in packages where its + // fixes would create an import cycle. + if within(pass, "slices", "runtime") { + return nil, nil + } + + var ( + inspect = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + info = pass.TypesInfo + ) + + // check is called for each RangeStmt of this form: + // for i, elem := range s { if cond { ... } } + check := func(file *ast.File, curRange inspector.Cursor) { + rng := curRange.Node().(*ast.RangeStmt) + ifStmt := rng.Body.List[0].(*ast.IfStmt) + + // isSliceElem reports whether e denotes the + // current slice element (elem or s[i]). + isSliceElem := func(e ast.Expr) bool { + if rng.Value != nil && astutil.EqualSyntax(e, rng.Value) { + return true // "elem" + } + if x, ok := e.(*ast.IndexExpr); ok && + astutil.EqualSyntax(x.X, rng.X) && + astutil.EqualSyntax(x.Index, rng.Key) { + return true // "s[i]" + } + return false + } + + // Examine the condition for one of these forms: + // + // - if elem or s[i] == needle { ... } => Contains + // - if predicate(s[i] or elem) { ... } => ContainsFunc + var ( + funcName string // "Contains" or "ContainsFunc" + arg2 ast.Expr // second argument to func (needle or predicate) + ) + switch cond := ifStmt.Cond.(type) { + case *ast.BinaryExpr: + if cond.Op == token.EQL { + var elem ast.Expr + if isSliceElem(cond.X) { + funcName = "Contains" + elem = cond.X + arg2 = cond.Y // "if elem == needle" + } else if isSliceElem(cond.Y) { + funcName = "Contains" + elem = cond.Y + arg2 = cond.X // "if needle == elem" + } + + // Reject if elem and needle have different types. + if elem != nil { + tElem := info.TypeOf(elem) + tNeedle := info.TypeOf(arg2) + if !types.Identical(tElem, tNeedle) { + // Avoid ill-typed slices.Contains([]error, any). + if !types.AssignableTo(tNeedle, tElem) { + return + } + // TODO(adonovan): relax this check to allow + // slices.Contains([]error, error(any)), + // inserting an explicit widening conversion + // around the needle. + return + } + } + } + + case *ast.CallExpr: + if len(cond.Args) == 1 && + isSliceElem(cond.Args[0]) && + typeutil.Callee(info, cond) != nil { // not a conversion + + // Attempt to get signature + sig, isSignature := info.TypeOf(cond.Fun).(*types.Signature) + if isSignature { + // skip variadic functions + if sig.Variadic() { + return + } + + // Slice element type must match function parameter type. + var ( + tElem = typeparams.CoreType(info.TypeOf(rng.X)).(*types.Slice).Elem() + tParam = sig.Params().At(0).Type() + ) + if !types.Identical(tElem, tParam) { + return + } + } + + funcName = "ContainsFunc" + arg2 = cond.Fun // "if predicate(elem)" + } + } + if funcName == "" { + return // not a candidate for Contains{,Func} + } + + // body is the "true" body. + body := ifStmt.Body + if len(body.List) == 0 { + // (We could perhaps delete the loop entirely.) + return + } + + // Reject if the body, needle or predicate references either range variable. + usesRangeVar := func(n ast.Node) bool { + cur, ok := curRange.FindNode(n) + if !ok { + panic(fmt.Sprintf("FindNode(%T) failed", n)) + } + return uses(index, cur, info.Defs[rng.Key.(*ast.Ident)]) || + rng.Value != nil && uses(index, cur, info.Defs[rng.Value.(*ast.Ident)]) + } + if usesRangeVar(body) { + // Body uses range var "i" or "elem". + // + // (The check for "i" could be relaxed when we + // generalize this to support slices.Index; + // and the check for "elem" could be relaxed + // if "elem" can safely be replaced in the + // body by "needle".) + return + } + if usesRangeVar(arg2) { + return + } + + // Prepare slices.Contains{,Func} call. + prefix, importEdits := refactor.AddImport(info, file, "slices", "slices", funcName, rng.Pos()) + contains := fmt.Sprintf("%s%s(%s, %s)", + prefix, + funcName, + astutil.Format(pass.Fset, rng.X), + astutil.Format(pass.Fset, arg2)) + + report := func(edits []analysis.TextEdit) { + pass.Report(analysis.Diagnostic{ + Pos: rng.Pos(), + End: rng.End(), + Message: fmt.Sprintf("Loop can be simplified using slices.%s", funcName), + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Replace loop by call to slices." + funcName, + TextEdits: append(edits, importEdits...), + }}, + }) + } + + // Last statement of body must return/break out of the loop. + // + // TODO(adonovan): opt:consider avoiding FindNode with new API of form: + // curRange.Get(edge.RangeStmt_Body, -1). + // Get(edge.BodyStmt_List, 0). + // Get(edge.IfStmt_Body) + curBody, _ := curRange.FindNode(body) + curLastStmt, _ := curBody.LastChild() + + // Reject if any statement in the body except the + // last has a free continuation (continue or break) + // that might affected by melting down the loop. + // + // TODO(adonovan): relax check by analyzing branch target. + for curBodyStmt := range curBody.Children() { + if curBodyStmt != curLastStmt { + for range curBodyStmt.Preorder((*ast.BranchStmt)(nil), (*ast.ReturnStmt)(nil)) { + return + } + } + } + + switch lastStmt := curLastStmt.Node().(type) { + case *ast.ReturnStmt: + // Have: for ... range seq { if ... { stmts; return x } } + + // Special case: + // body={ return true } next="return false" (or negation) + // => return [!]slices.Contains(...) + if curNext, ok := curRange.NextSibling(); ok { + nextStmt := curNext.Node().(ast.Stmt) + tval := isReturnTrueOrFalse(info, lastStmt) + fval := isReturnTrueOrFalse(info, nextStmt) + if len(body.List) == 1 && tval*fval < 0 { + // for ... { if ... { return true/false } } + // => return [!]slices.Contains(...) + report([]analysis.TextEdit{ + // Delete the range statement and following space. + { + Pos: rng.Pos(), + End: nextStmt.Pos(), + }, + // Change return to [!]slices.Contains(...). + { + Pos: nextStmt.Pos(), + End: nextStmt.End(), + NewText: fmt.Appendf(nil, "return %s%s", + cond(tval > 0, "", "!"), + contains), + }, + }) + return + } + } + + // General case: + // => if slices.Contains(...) { stmts; return x } + report([]analysis.TextEdit{ + // Replace "for ... { if ... " with "if slices.Contains(...)". + { + Pos: rng.Pos(), + End: ifStmt.Body.Pos(), + NewText: fmt.Appendf(nil, "if %s ", contains), + }, + // Delete '}' of range statement and preceding space. + { + Pos: ifStmt.Body.End(), + End: rng.End(), + }, + }) + return + + case *ast.BranchStmt: + if lastStmt.Tok == token.BREAK && lastStmt.Label == nil { // unlabeled break + // Have: for ... { if ... { stmts; break } } + + var prevStmt ast.Stmt // previous statement to range (if any) + if curPrev, ok := curRange.PrevSibling(); ok { + // If the RangeStmt's previous sibling is a Stmt, + // the RangeStmt must be among the Body list of + // a BlockStmt, CauseClause, or CommClause. + // In all cases, the prevStmt is the immediate + // predecessor of the RangeStmt during execution. + // + // (This is not true for Stmts in general; + // see [Cursor.Children] and #71074.) + prevStmt, _ = curPrev.Node().(ast.Stmt) + } + + // Special case: + // prev="lhs = false" body={ lhs = true; break } + // => lhs = slices.Contains(...) (or negation) + if assign, ok := body.List[0].(*ast.AssignStmt); ok && + len(body.List) == 2 && + assign.Tok == token.ASSIGN && + len(assign.Lhs) == 1 && + len(assign.Rhs) == 1 { + + // Have: body={ lhs = rhs; break } + + if prevAssign, ok := prevStmt.(*ast.AssignStmt); ok && + len(prevAssign.Lhs) == 1 && + len(prevAssign.Rhs) == 1 && + astutil.EqualSyntax(prevAssign.Lhs[0], assign.Lhs[0]) && + is[*ast.Ident](assign.Rhs[0]) && + info.Uses[assign.Rhs[0].(*ast.Ident)] == builtinTrue { + + // Have: + // lhs = false + // for ... { if ... { lhs = true; break } } + // => + // lhs = slices.Contains(...) + // + // TODO(adonovan): + // - support "var lhs bool = false" and variants. + // - support negation. + // Both these variants seem quite significant. + // - allow the break to be omitted. + report([]analysis.TextEdit{ + // Replace "rhs" of previous assignment by slices.Contains(...) + { + Pos: prevAssign.Rhs[0].Pos(), + End: prevAssign.Rhs[0].End(), + NewText: []byte(contains), + }, + // Delete the loop and preceding space. + { + Pos: prevAssign.Rhs[0].End(), + End: rng.End(), + }, + }) + return + } + } + + // General case: + // for ... { if ... { stmts; break } } + // => if slices.Contains(...) { stmts } + report([]analysis.TextEdit{ + // Replace "for ... { if ... " with "if slices.Contains(...)". + { + Pos: rng.Pos(), + End: ifStmt.Body.Pos(), + NewText: fmt.Appendf(nil, "if %s ", contains), + }, + // Delete break statement and preceding space. + { + Pos: func() token.Pos { + if len(body.List) > 1 { + beforeBreak, _ := curLastStmt.PrevSibling() + return beforeBreak.Node().End() + } + return lastStmt.Pos() + }(), + End: lastStmt.End(), + }, + // Delete '}' of range statement and preceding space. + { + Pos: ifStmt.Body.End(), + End: rng.End(), + }, + }) + return + } + } + } + + for curFile := range filesUsing(inspect, info, "go1.21") { + file := curFile.Node().(*ast.File) + + for curRange := range curFile.Preorder((*ast.RangeStmt)(nil)) { + rng := curRange.Node().(*ast.RangeStmt) + + if is[*ast.Ident](rng.Key) && + rng.Tok == token.DEFINE && + len(rng.Body.List) == 1 && + is[*types.Slice](typeparams.CoreType(info.TypeOf(rng.X))) { + + // Have: + // - for _, elem := range s { S } + // - for i := range s { S } + + if ifStmt, ok := rng.Body.List[0].(*ast.IfStmt); ok && + ifStmt.Init == nil && ifStmt.Else == nil { + + // Have: for i, elem := range s { if cond { ... } } + check(file, curRange) + } + } + } + } + return nil, nil +} + +// -- helpers -- + +// isReturnTrueOrFalse returns nonzero if stmt returns true (+1) or false (-1). +func isReturnTrueOrFalse(info *types.Info, stmt ast.Stmt) int { + if ret, ok := stmt.(*ast.ReturnStmt); ok && len(ret.Results) == 1 { + if id, ok := ret.Results[0].(*ast.Ident); ok { + switch info.Uses[id] { + case builtinTrue: + return +1 + case builtinFalse: + return -1 + } + } + } + return 0 +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/slicesdelete.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/slicesdelete.go new file mode 100644 index 00000000000..b3e063db0fb --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/slicesdelete.go @@ -0,0 +1,184 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +import ( + "go/ast" + "go/constant" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysisinternal/generated" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/refactor" + "golang.org/x/tools/internal/typesinternal" +) + +// Warning: this analyzer is not safe to enable by default (not nil-preserving). +var SlicesDeleteAnalyzer = &analysis.Analyzer{ + Name: "slicesdelete", + Doc: analysisinternal.MustExtractDoc(doc, "slicesdelete"), + Requires: []*analysis.Analyzer{ + generated.Analyzer, + inspect.Analyzer, + }, + Run: slicesdelete, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#slicesdelete", +} + +// The slicesdelete pass attempts to replace instances of append(s[:i], s[i+k:]...) +// with slices.Delete(s, i, i+k) where k is some positive constant. +// Other variations that will also have suggested replacements include: +// append(s[:i-1], s[i:]...) and append(s[:i+k1], s[i+k2:]) where k2 > k1. +func slicesdelete(pass *analysis.Pass) (any, error) { + skipGenerated(pass) + + // Skip the analyzer in packages where its + // fixes would create an import cycle. + if within(pass, "slices", "runtime") { + return nil, nil + } + + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + info := pass.TypesInfo + report := func(file *ast.File, call *ast.CallExpr, slice1, slice2 *ast.SliceExpr) { + insert := func(pos token.Pos, text string) analysis.TextEdit { + return analysis.TextEdit{Pos: pos, End: pos, NewText: []byte(text)} + } + isIntExpr := func(e ast.Expr) bool { + return types.Identical(types.Default(info.TypeOf(e)), builtinInt.Type()) + } + isIntShadowed := func() bool { + scope := pass.TypesInfo.Scopes[file].Innermost(call.Lparen) + if _, obj := scope.LookupParent("int", call.Lparen); obj != builtinInt { + return true // int type is shadowed + } + return false + } + + prefix, edits := refactor.AddImport(info, file, "slices", "slices", "Delete", call.Pos()) + // append's indices may be any integer type; slices.Delete requires int. + // Insert int conversions as needed (and if possible). + if isIntShadowed() && (!isIntExpr(slice1.High) || !isIntExpr(slice2.Low)) { + return + } + if !isIntExpr(slice1.High) { + edits = append(edits, + insert(slice1.High.Pos(), "int("), + insert(slice1.High.End(), ")"), + ) + } + if !isIntExpr(slice2.Low) { + edits = append(edits, + insert(slice2.Low.Pos(), "int("), + insert(slice2.Low.End(), ")"), + ) + } + + pass.Report(analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: "Replace append with slices.Delete", + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Replace append with slices.Delete", + TextEdits: append(edits, []analysis.TextEdit{ + // Change name of called function. + { + Pos: call.Fun.Pos(), + End: call.Fun.End(), + NewText: []byte(prefix + "Delete"), + }, + // Delete ellipsis. + { + Pos: call.Ellipsis, + End: call.Ellipsis + token.Pos(len("...")), // delete ellipsis + }, + // Remove second slice variable name. + { + Pos: slice2.X.Pos(), + End: slice2.X.End(), + }, + // Insert after first slice variable name. + { + Pos: slice1.X.End(), + NewText: []byte(", "), + }, + // Remove brackets and colons. + { + Pos: slice1.Lbrack, + End: slice1.High.Pos(), + }, + { + Pos: slice1.Rbrack, + End: slice1.Rbrack + 1, + }, + { + Pos: slice2.Lbrack, + End: slice2.Lbrack + 1, + }, + { + Pos: slice2.Low.End(), + End: slice2.Rbrack + 1, + }, + }...), + }}, + }) + } + for curFile := range filesUsing(inspect, info, "go1.21") { + file := curFile.Node().(*ast.File) + for curCall := range curFile.Preorder((*ast.CallExpr)(nil)) { + call := curCall.Node().(*ast.CallExpr) + if id, ok := call.Fun.(*ast.Ident); ok && len(call.Args) == 2 { + // Verify we have append with two slices and ... operator, + // the first slice has no low index and second slice has no + // high index, and not a three-index slice. + if call.Ellipsis.IsValid() && info.Uses[id] == builtinAppend { + slice1, ok1 := call.Args[0].(*ast.SliceExpr) + slice2, ok2 := call.Args[1].(*ast.SliceExpr) + if ok1 && slice1.Low == nil && !slice1.Slice3 && + ok2 && slice2.High == nil && !slice2.Slice3 && + astutil.EqualSyntax(slice1.X, slice2.X) && + typesinternal.NoEffects(info, slice1.X) && + increasingSliceIndices(info, slice1.High, slice2.Low) { + // Have append(s[:a], s[b:]...) where we can verify a < b. + report(file, call, slice1, slice2) + } + } + } + } + } + return nil, nil +} + +// Given two slice indices a and b, returns true if we can verify that a < b. +// It recognizes certain forms such as i+k1 < i+k2 where k1 < k2. +func increasingSliceIndices(info *types.Info, a, b ast.Expr) bool { + // Given an expression of the form i±k, returns (i, k) + // where k is a signed constant. Otherwise it returns (e, 0). + split := func(e ast.Expr) (ast.Expr, constant.Value) { + if binary, ok := e.(*ast.BinaryExpr); ok && (binary.Op == token.SUB || binary.Op == token.ADD) { + // Negate constants if operation is subtract instead of add + if k := info.Types[binary.Y].Value; k != nil { + return binary.X, constant.UnaryOp(binary.Op, k, 0) // i ± k + } + } + return e, constant.MakeInt64(0) + } + + // Handle case where either a or b is a constant + ak := info.Types[a].Value + bk := info.Types[b].Value + if ak != nil || bk != nil { + return ak != nil && bk != nil && constant.Compare(ak, token.LSS, bk) + } + + ai, ak := split(a) + bi, bk := split(b) + return astutil.EqualSyntax(ai, bi) && constant.Compare(ak, token.LSS, bk) +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/sortslice.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/sortslice.go new file mode 100644 index 00000000000..66af16d1f66 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/sortslice.go @@ -0,0 +1,124 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +import ( + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysisinternal/generated" + typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/refactor" + "golang.org/x/tools/internal/typesinternal/typeindex" +) + +// (Not to be confused with go/analysis/passes/sortslice.) +var SlicesSortAnalyzer = &analysis.Analyzer{ + Name: "slicessort", + Doc: analysisinternal.MustExtractDoc(doc, "slicessort"), + Requires: []*analysis.Analyzer{ + generated.Analyzer, + inspect.Analyzer, + typeindexanalyzer.Analyzer, + }, + Run: slicessort, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#slicessort", +} + +// The slicessort pass replaces sort.Slice(slice, less) with +// slices.Sort(slice) when slice is a []T and less is a FuncLit +// equivalent to cmp.Ordered[T]. +// +// sort.Slice(s, func(i, j int) bool { return s[i] < s[j] }) +// => slices.Sort(s) +// +// There is no slices.SortStable. +// +// TODO(adonovan): support +// +// - sort.Slice(s, func(i, j int) bool { return s[i] ... s[j] }) +// -> slices.SortFunc(s, func(x, y T) int { return x ... y }) +// iff all uses of i, j can be replaced by s[i], s[j] and "<" can be replaced with cmp.Compare. +// +// - As above for sort.SliceStable -> slices.SortStableFunc. +// +// - sort.Sort(x) where x has a named slice type whose Less method is the natural order. +// -> sort.Slice(x) +func slicessort(pass *analysis.Pass) (any, error) { + skipGenerated(pass) + + // Skip the analyzer in packages where its + // fixes would create an import cycle. + if within(pass, "slices", "sort", "runtime") { + return nil, nil + } + + var ( + info = pass.TypesInfo + index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + sortSlice = index.Object("sort", "Slice") + ) + for curCall := range index.Calls(sortSlice) { + call := curCall.Node().(*ast.CallExpr) + if lit, ok := call.Args[1].(*ast.FuncLit); ok && len(lit.Body.List) == 1 { + sig := info.Types[lit.Type].Type.(*types.Signature) + + // Have: sort.Slice(s, func(i, j int) bool { return ... }) + s := call.Args[0] + i := sig.Params().At(0) + j := sig.Params().At(1) + + if ret, ok := lit.Body.List[0].(*ast.ReturnStmt); ok { + if compare, ok := ret.Results[0].(*ast.BinaryExpr); ok && compare.Op == token.LSS { + // isIndex reports whether e is s[v]. + isIndex := func(e ast.Expr, v *types.Var) bool { + index, ok := e.(*ast.IndexExpr) + return ok && + astutil.EqualSyntax(index.X, s) && + is[*ast.Ident](index.Index) && + info.Uses[index.Index.(*ast.Ident)] == v + } + file := astutil.EnclosingFile(curCall) + if isIndex(compare.X, i) && isIndex(compare.Y, j) && + fileUses(info, file, "go1.21") { + // Have: sort.Slice(s, func(i, j int) bool { return s[i] < s[j] }) + + prefix, importEdits := refactor.AddImport( + info, file, "slices", "slices", "Sort", call.Pos()) + + pass.Report(analysis.Diagnostic{ + // Highlight "sort.Slice". + Pos: call.Fun.Pos(), + End: call.Fun.End(), + Message: "sort.Slice can be modernized using slices.Sort", + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Replace sort.Slice call by slices.Sort", + TextEdits: append(importEdits, []analysis.TextEdit{ + { + // Replace sort.Slice with slices.Sort. + Pos: call.Fun.Pos(), + End: call.Fun.End(), + NewText: []byte(prefix + "Sort"), + }, + { + // Eliminate FuncLit. + Pos: call.Args[0].End(), + End: call.Rparen, + }, + }...), + }}, + }) + } + } + } + } + } + return nil, nil +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stditerators.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stditerators.go new file mode 100644 index 00000000000..20817520e1b --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stditerators.go @@ -0,0 +1,354 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/edge" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysisinternal/generated" + typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/goplsexport" + "golang.org/x/tools/internal/refactor" + "golang.org/x/tools/internal/stdlib" + "golang.org/x/tools/internal/typesinternal/typeindex" +) + +var stditeratorsAnalyzer = &analysis.Analyzer{ + Name: "stditerators", + Doc: analysisinternal.MustExtractDoc(doc, "stditerators"), + Requires: []*analysis.Analyzer{ + generated.Analyzer, + typeindexanalyzer.Analyzer, + }, + Run: stditerators, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#stditerators", +} + +func init() { + // Export to gopls until this is a published modernizer. + goplsexport.StdIteratorsModernizer = stditeratorsAnalyzer +} + +// stditeratorsTable records std types that have legacy T.{Len,At} +// iteration methods as well as a newer T.All method that returns an +// iter.Seq. +var stditeratorsTable = [...]struct { + pkgpath, typename, lenmethod, atmethod, itermethod, elemname string +}{ + // Example: in go/types, (*Tuple).Variables returns an + // iterator that replaces a loop over (*Tuple).{Len,At}. + // The loop variable is named "v". + {"go/types", "Interface", "NumEmbeddeds", "EmbeddedType", "EmbeddedTypes", "etyp"}, + {"go/types", "Interface", "NumExplicitMethods", "ExplicitMethod", "ExplicitMethods", "method"}, + {"go/types", "Interface", "NumMethods", "Method", "Methods", "method"}, + {"go/types", "MethodSet", "Len", "At", "Methods", "method"}, + {"go/types", "Named", "NumMethods", "Method", "Methods", "method"}, + {"go/types", "Scope", "NumChildren", "Child", "Children", "child"}, + {"go/types", "Struct", "NumFields", "Field", "Fields", "field"}, + {"go/types", "Tuple", "Len", "At", "Variables", "v"}, + {"go/types", "TypeList", "Len", "At", "Types", "t"}, + {"go/types", "TypeParamList", "Len", "At", "TypeParams", "tparam"}, + {"go/types", "Union", "Len", "Term", "Terms", "term"}, + // TODO(adonovan): support Seq2. Bonus: transform uses of both key and value. + // {"reflect", "Value", "NumFields", "Field", "Fields", "field"}, +} + +// stditerators suggests fixes to replace loops using Len/At-style +// iterator APIs by a range loop over an iterator. The set of +// participating types and methods is defined by [iteratorsTable]. +// +// Pattern: +// +// for i := 0; i < x.Len(); i++ { +// use(x.At(i)) +// } +// +// => +// +// for elem := range x.All() { +// use(elem) +// } +// +// Variant: +// +// for i := range x.Len() { ... } +// +// Note: Iterators have a dynamic cost. How do we know that +// the user hasn't intentionally chosen not to use an +// iterator for that reason? We don't want to go fix to +// undo optimizations. Do we need a suppression mechanism? +func stditerators(pass *analysis.Pass) (any, error) { + skipGenerated(pass) + + var ( + index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + info = pass.TypesInfo + ) + + for _, row := range stditeratorsTable { + // Don't offer fixes within the package + // that defines the iterator in question. + if within(pass, row.pkgpath) { + continue + } + + var ( + lenMethod = index.Selection(row.pkgpath, row.typename, row.lenmethod) + atMethod = index.Selection(row.pkgpath, row.typename, row.atmethod) + ) + + // chooseName returns an appropriate fresh name + // for the index variable of the iterator loop + // whose body is specified. + // + // If the loop body starts with + // + // for ... { e := x.At(i); use(e) } + // + // then chooseName prefers the name e and additionally + // returns the var's symbol. We'll transform this to: + // + // for e := range x.Len() { e := e; use(e) } + // + // which leaves a redundant assignment that a + // subsequent 'forvar' pass will eliminate. + chooseName := func(curBody inspector.Cursor, x ast.Expr, i *types.Var) (string, *types.Var) { + // Is body { elem := x.At(i); ... } ? + body := curBody.Node().(*ast.BlockStmt) + if len(body.List) > 0 { + if assign, ok := body.List[0].(*ast.AssignStmt); ok && + assign.Tok == token.DEFINE && + len(assign.Lhs) == 1 && + len(assign.Rhs) == 1 && + is[*ast.Ident](assign.Lhs[0]) { + // call to x.At(i)? + if call, ok := assign.Rhs[0].(*ast.CallExpr); ok && + typeutil.Callee(info, call) == atMethod && + astutil.EqualSyntax(ast.Unparen(call.Fun).(*ast.SelectorExpr).X, x) && + is[*ast.Ident](call.Args[0]) && + info.Uses[call.Args[0].(*ast.Ident)] == i { + // Have: { elem := x.At(i); ... } + id := assign.Lhs[0].(*ast.Ident) + return id.Name, info.Defs[id].(*types.Var) + } + } + } + + loop := curBody.Parent().Node() + return refactor.FreshName(info.Scopes[loop], loop.Pos(), row.elemname), nil + } + + // Process each call of x.Len(). + nextCall: + for curLenCall := range index.Calls(lenMethod) { + lenSel, ok := ast.Unparen(curLenCall.Node().(*ast.CallExpr).Fun).(*ast.SelectorExpr) + if !ok { + continue + } + // lenSel is "x.Len" + + var ( + rng analysis.Range // where to report diagnostic + curBody inspector.Cursor // loop body + indexVar *types.Var // old loop index var + elemVar *types.Var // existing "elem := x.At(i)" var, if present + elem string // name for new loop var + edits []analysis.TextEdit + ) + + // Analyze enclosing loop. + switch ek, _ := curLenCall.ParentEdge(); ek { + case edge.BinaryExpr_Y: + // pattern 1: for i := 0; i < x.Len(); i++ { ... } + var ( + curCmp = curLenCall.Parent() + cmp = curCmp.Node().(*ast.BinaryExpr) + ) + if cmp.Op != token.LSS || + !astutil.IsChildOf(curCmp, edge.ForStmt_Cond) { + continue + } + if id, ok := cmp.X.(*ast.Ident); ok { + // Have: for _; i < x.Len(); _ { ... } + var ( + v = info.Uses[id].(*types.Var) + curFor = curCmp.Parent() + loop = curFor.Node().(*ast.ForStmt) + ) + if v != isIncrementLoop(info, loop) { + continue + } + // Have: for i := 0; i < x.Len(); i++ { ... }. + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + rng = analysisinternal.Range(loop.For, loop.Post.End()) + indexVar = v + curBody = curFor.ChildAt(edge.ForStmt_Body, -1) + elem, elemVar = chooseName(curBody, lenSel.X, indexVar) + + // for i := 0; i < x.Len(); i++ { + // ---- ------- --- ----- + // for elem := range x.All() { + edits = []analysis.TextEdit{ + { + Pos: v.Pos(), + End: v.Pos() + token.Pos(len(v.Name())), + NewText: []byte(elem), + }, + { + Pos: loop.Init.(*ast.AssignStmt).Rhs[0].Pos(), + End: cmp.Y.Pos(), + NewText: []byte("range "), + }, + { + Pos: lenSel.Sel.Pos(), + End: lenSel.Sel.End(), + NewText: []byte(row.itermethod), + }, + { + Pos: curLenCall.Node().End(), + End: loop.Post.End(), + }, + } + } + + case edge.RangeStmt_X: + // pattern 2: for i := range x.Len() { ... } + var ( + curRange = curLenCall.Parent() + loop = curRange.Node().(*ast.RangeStmt) + ) + if id, ok := loop.Key.(*ast.Ident); ok && + loop.Value == nil && + loop.Tok == token.DEFINE { + // Have: for i := range x.Len() { ... } + // ~~~~~~~~~~~~~ + + rng = analysisinternal.Range(loop.Range, loop.X.End()) + indexVar = info.Defs[id].(*types.Var) + curBody = curRange.ChildAt(edge.RangeStmt_Body, -1) + elem, elemVar = chooseName(curBody, lenSel.X, indexVar) + + // for i := range x.Len() { + // ---- --- + // for elem := range x.All() { + edits = []analysis.TextEdit{ + { + Pos: loop.Key.Pos(), + End: loop.Key.End(), + NewText: []byte(elem), + }, + { + Pos: lenSel.Sel.Pos(), + End: lenSel.Sel.End(), + NewText: []byte(row.itermethod), + }, + } + } + } + + if indexVar == nil { + continue // no loop of the required form + } + + // TODO(adonovan): what about possible + // modifications of x within the loop? + // Aliasing seems to make a conservative + // treatment impossible. + + // Check that all uses of var i within loop body are x.At(i). + for curUse := range index.Uses(indexVar) { + if !curBody.Contains(curUse) { + continue + } + if ek, argidx := curUse.ParentEdge(); ek != edge.CallExpr_Args || argidx != 0 { + continue nextCall // use is not arg of call + } + curAtCall := curUse.Parent() + atCall := curAtCall.Node().(*ast.CallExpr) + if typeutil.Callee(info, atCall) != atMethod { + continue nextCall // use is not arg of call to T.At + } + atSel := ast.Unparen(atCall.Fun).(*ast.SelectorExpr) + + // Check receivers of Len, At calls match (syntactically). + if !astutil.EqualSyntax(lenSel.X, atSel.X) { + continue nextCall + } + + // At each point of use, check that + // the fresh variable is not shadowed + // by an intervening local declaration + // (or by the idiomatic elemVar optionally + // found by chooseName). + if obj := lookup(info, curAtCall, elem); obj != nil && obj != elemVar && obj.Pos() > indexVar.Pos() { + // (Ideally, instead of giving up, we would + // embellish the name and try again.) + continue nextCall + } + + // use(x.At(i)) + // ------- + // use(elem ) + edits = append(edits, analysis.TextEdit{ + Pos: atCall.Pos(), + End: atCall.End(), + NewText: []byte(elem), + }) + } + + // Check file Go version is new enough for the iterator method. + // (In the long run, version filters are not highly selective, + // so there's no need to do them first, especially as this check + // may be somewhat expensive.) + if v, ok := methodGoVersion(row.pkgpath, row.typename, row.itermethod); !ok { + panic("no version found") + } else if file := astutil.EnclosingFile(curLenCall); !fileUses(info, file, v.String()) { + continue nextCall + } + + pass.Report(analysis.Diagnostic{ + Pos: rng.Pos(), + End: rng.End(), + Message: fmt.Sprintf("%s/%s loop can simplified using %s.%s iteration", + row.lenmethod, row.atmethod, row.typename, row.itermethod), + SuggestedFixes: []analysis.SuggestedFix{{ + Message: fmt.Sprintf( + "Replace %s/%s loop with %s.%s iteration", + row.lenmethod, row.atmethod, row.typename, row.itermethod), + TextEdits: edits, + }}, + }) + } + } + return nil, nil +} + +// -- helpers -- + +// methodGoVersion reports the version at which the method +// (pkgpath.recvtype).method appeared in the standard library. +func methodGoVersion(pkgpath, recvtype, method string) (stdlib.Version, bool) { + // TODO(adonovan): opt: this might be inefficient for large packages + // like go/types. If so, memoize using a map (and kill two birds with + // one stone by also memoizing the 'within' check above). + for _, sym := range stdlib.PackageSymbols[pkgpath] { + if sym.Kind == stdlib.Method { + _, recv, name := sym.SplitMethod() + if recv == recvtype && name == method { + return sym.Version, true + } + } + } + return 0, false +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringsbuilder.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringsbuilder.go new file mode 100644 index 00000000000..56d0ba73cc2 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringsbuilder.go @@ -0,0 +1,328 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +import ( + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/edge" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysisinternal/generated" + typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/refactor" + "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/typesinternal/typeindex" +) + +var StringsBuilderAnalyzer = &analysis.Analyzer{ + Name: "stringsbuilder", + Doc: analysisinternal.MustExtractDoc(doc, "stringsbuilder"), + Requires: []*analysis.Analyzer{ + generated.Analyzer, + inspect.Analyzer, + typeindexanalyzer.Analyzer, + }, + Run: stringsbuilder, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#stringbuilder", +} + +// stringsbuilder replaces string += string in a loop by strings.Builder. +func stringsbuilder(pass *analysis.Pass) (any, error) { + skipGenerated(pass) + + // Skip the analyzer in packages where its + // fixes would create an import cycle. + if within(pass, "strings", "runtime") { + return nil, nil + } + + var ( + inspect = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + ) + + // Gather all local string variables that appear on the + // LHS of some string += string assignment. + candidates := make(map[*types.Var]bool) + for curAssign := range inspect.Root().Preorder((*ast.AssignStmt)(nil)) { + assign := curAssign.Node().(*ast.AssignStmt) + if assign.Tok == token.ADD_ASSIGN && is[*ast.Ident](assign.Lhs[0]) { + if v, ok := pass.TypesInfo.Uses[assign.Lhs[0].(*ast.Ident)].(*types.Var); ok && + !typesinternal.IsPackageLevel(v) && // TODO(adonovan): in go1.25, use v.Kind() == types.LocalVar && + types.Identical(v.Type(), builtinString.Type()) { + candidates[v] = true + } + } + } + + // Now check each candidate variable's decl and uses. +nextcand: + for v := range candidates { + var edits []analysis.TextEdit + + // Check declaration of s: + // + // s := expr + // var s [string] [= expr] + // + // and transform to: + // + // var s strings.Builder; s.WriteString(expr) + // + def, ok := index.Def(v) + if !ok { + continue + } + ek, _ := def.ParentEdge() + if ek == edge.AssignStmt_Lhs && + len(def.Parent().Node().(*ast.AssignStmt).Lhs) == 1 { + // Have: s := expr + // => var s strings.Builder; s.WriteString(expr) + + assign := def.Parent().Node().(*ast.AssignStmt) + + // Reject "if s := f(); ..." since in that context + // we can't replace the assign with two statements. + switch def.Parent().Parent().Node().(type) { + case *ast.BlockStmt, *ast.CaseClause, *ast.CommClause: + // OK: these are the parts of syntax that + // allow unrestricted statement lists. + default: + continue + } + + // Add strings import. + prefix, importEdits := refactor.AddImport( + pass.TypesInfo, astutil.EnclosingFile(def), "strings", "strings", "Builder", v.Pos()) + edits = append(edits, importEdits...) + + if isEmptyString(pass.TypesInfo, assign.Rhs[0]) { + // s := "" + // --------------------- + // var s strings.Builder + edits = append(edits, analysis.TextEdit{ + Pos: assign.Pos(), + End: assign.End(), + NewText: fmt.Appendf(nil, "var %[1]s %[2]sBuilder", v.Name(), prefix), + }) + + } else { + // s := expr + // ------------------------------------- - + // var s strings.Builder; s.WriteString(expr) + edits = append(edits, []analysis.TextEdit{ + { + Pos: assign.Pos(), + End: assign.Rhs[0].Pos(), + NewText: fmt.Appendf(nil, "var %[1]s %[2]sBuilder; %[1]s.WriteString(", v.Name(), prefix), + }, + { + Pos: assign.End(), + End: assign.End(), + NewText: []byte(")"), + }, + }...) + + } + + } else if ek == edge.ValueSpec_Names && + len(def.Parent().Node().(*ast.ValueSpec).Names) == 1 { + // Have: var s [string] [= expr] + // => var s strings.Builder; s.WriteString(expr) + + // Add strings import. + prefix, importEdits := refactor.AddImport( + pass.TypesInfo, astutil.EnclosingFile(def), "strings", "strings", "Builder", v.Pos()) + edits = append(edits, importEdits...) + + spec := def.Parent().Node().(*ast.ValueSpec) + decl := def.Parent().Parent().Node().(*ast.GenDecl) + + init := spec.Names[0].End() // start of " = expr" + if spec.Type != nil { + init = spec.Type.End() + } + + // var s [string] + // ---------------- + // var s strings.Builder + edits = append(edits, analysis.TextEdit{ + Pos: spec.Names[0].End(), + End: init, + NewText: fmt.Appendf(nil, " %sBuilder", prefix), + }) + + if len(spec.Values) > 0 && !isEmptyString(pass.TypesInfo, spec.Values[0]) { + // = expr + // ---------------- - + // ; s.WriteString(expr) + edits = append(edits, []analysis.TextEdit{ + { + Pos: init, + End: spec.Values[0].Pos(), + NewText: fmt.Appendf(nil, "; %s.WriteString(", v.Name()), + }, + { + Pos: decl.End(), + End: decl.End(), + NewText: []byte(")"), + }, + }...) + } else { + // delete "= expr" + edits = append(edits, analysis.TextEdit{ + Pos: init, + End: spec.End(), + }) + } + + } else { + continue + } + + // Check uses of s. + // + // - All uses of s except the final one must be of the form + // + // s += expr + // + // Each of these will become s.WriteString(expr). + // At least one of them must be in an intervening loop + // w.r.t. the declaration of s: + // + // var s string + // for ... { s += expr } + // + // - The final use of s must be as an rvalue (e.g. use(s), not &s). + // This will become s.String(). + // + // Perhaps surprisingly, it is fine for there to be an + // intervening loop or lambda w.r.t. the declaration of s: + // + // var s strings.Builder + // for range kSmall { s.WriteString(expr) } + // for range kLarge { use(s.String()) } // called repeatedly + // + // Even though that might cause the s.String() operation to be + // executed repeatedly, this is not a deoptimization because, + // by design, (*strings.Builder).String does not allocate. + var ( + numLoopAssigns int // number of += assignments within a loop + loopAssign *ast.AssignStmt // first += assignment within a loop + seenRvalueUse bool // => we've seen the sole final use of s as an rvalue + ) + for curUse := range index.Uses(v) { + // Strip enclosing parens around Ident. + ek, _ := curUse.ParentEdge() + for ek == edge.ParenExpr_X { + curUse = curUse.Parent() + ek, _ = curUse.ParentEdge() + } + + // The rvalueUse must be the lexically last use. + if seenRvalueUse { + continue nextcand + } + + // intervening reports whether cur has an ancestor of + // one of the given types that is within the scope of v. + intervening := func(types ...ast.Node) bool { + for cur := range curUse.Enclosing(types...) { + if v.Pos() <= cur.Node().Pos() { // in scope of v + return true + } + } + return false + } + + if ek == edge.AssignStmt_Lhs { + assign := curUse.Parent().Node().(*ast.AssignStmt) + if assign.Tok != token.ADD_ASSIGN { + continue nextcand + } + // Have: s += expr + + // At least one of the += operations + // must appear within a loop. + // relative to the declaration of s. + if intervening((*ast.ForStmt)(nil), (*ast.RangeStmt)(nil)) { + numLoopAssigns++ + if loopAssign == nil { + loopAssign = assign + } + } + + // s += expr + // ------------- - + // s.WriteString(expr) + edits = append(edits, []analysis.TextEdit{ + // replace += with .WriteString() + { + Pos: assign.TokPos, + End: assign.Rhs[0].Pos(), + NewText: []byte(".WriteString("), + }, + // insert ")" + { + Pos: assign.End(), + End: assign.End(), + NewText: []byte(")"), + }, + }...) + + } else if ek == edge.UnaryExpr_X && + curUse.Parent().Node().(*ast.UnaryExpr).Op == token.AND { + // Have: use(&s) + continue nextcand // s is used as an lvalue; reject + + } else { + // The only possible l-value uses of a string variable + // are assignments (s=expr, s+=expr, etc) and &s. + // (For strings, we can ignore method calls s.m().) + // All other uses are r-values. + seenRvalueUse = true + + edits = append(edits, analysis.TextEdit{ + // insert ".String()" + Pos: curUse.Node().End(), + End: curUse.Node().End(), + NewText: []byte(".String()"), + }) + } + } + if !seenRvalueUse { + continue nextcand // no rvalue use; reject + } + if numLoopAssigns == 0 { + continue nextcand // no += in a loop; reject + } + + pass.Report(analysis.Diagnostic{ + Pos: loopAssign.Pos(), + End: loopAssign.End(), + Message: "using string += string in a loop is inefficient", + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Replace string += string with strings.Builder", + TextEdits: edits, + }}, + }) + } + + return nil, nil +} + +// isEmptyString reports whether e (a string-typed expression) has constant value "". +func isEmptyString(info *types.Info, e ast.Expr) bool { + tv, ok := info.Types[e] + return ok && tv.Value != nil && constant.StringVal(tv.Value) == "" +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringscutprefix.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringscutprefix.go new file mode 100644 index 00000000000..9e76f953ed5 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringscutprefix.go @@ -0,0 +1,261 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +import ( + "fmt" + "go/ast" + "go/token" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysisinternal/generated" + typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/refactor" + "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/typesinternal/typeindex" +) + +var StringsCutPrefixAnalyzer = &analysis.Analyzer{ + Name: "stringscutprefix", + Doc: analysisinternal.MustExtractDoc(doc, "stringscutprefix"), + Requires: []*analysis.Analyzer{ + generated.Analyzer, + inspect.Analyzer, + typeindexanalyzer.Analyzer, + }, + Run: stringscutprefix, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#stringscutprefix", +} + +// stringscutprefix offers a fix to replace an if statement which +// calls to the 2 patterns below with strings.CutPrefix or strings.CutSuffix. +// +// Patterns: +// +// 1. if strings.HasPrefix(s, pre) { use(strings.TrimPrefix(s, pre) } +// => +// if after, ok := strings.CutPrefix(s, pre); ok { use(after) } +// +// 2. if after := strings.TrimPrefix(s, pre); after != s { use(after) } +// => +// if after, ok := strings.CutPrefix(s, pre); ok { use(after) } +// +// Similar patterns apply for CutSuffix. +// +// The use must occur within the first statement of the block, and the offered fix +// only replaces the first occurrence of strings.TrimPrefix/TrimSuffix. +// +// Variants: +// - bytes.HasPrefix/HasSuffix usage as pattern 1. +func stringscutprefix(pass *analysis.Pass) (any, error) { + skipGenerated(pass) + + var ( + inspect = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + info = pass.TypesInfo + + stringsTrimPrefix = index.Object("strings", "TrimPrefix") + bytesTrimPrefix = index.Object("bytes", "TrimPrefix") + stringsTrimSuffix = index.Object("strings", "TrimSuffix") + bytesTrimSuffix = index.Object("bytes", "TrimSuffix") + ) + if !index.Used(stringsTrimPrefix, bytesTrimPrefix, stringsTrimSuffix, bytesTrimSuffix) { + return nil, nil + } + + for curFile := range filesUsing(inspect, pass.TypesInfo, "go1.20") { + for curIfStmt := range curFile.Preorder((*ast.IfStmt)(nil)) { + ifStmt := curIfStmt.Node().(*ast.IfStmt) + + // pattern1 + if call, ok := ifStmt.Cond.(*ast.CallExpr); ok && ifStmt.Init == nil && len(ifStmt.Body.List) > 0 { + + obj := typeutil.Callee(info, call) + if !typesinternal.IsFunctionNamed(obj, "strings", "HasPrefix", "HasSuffix") && + !typesinternal.IsFunctionNamed(obj, "bytes", "HasPrefix", "HasSuffix") { + continue + } + isPrefix := strings.HasSuffix(obj.Name(), "Prefix") + + // Replace the first occurrence of strings.TrimPrefix(s, pre) in the first statement only, + // but not later statements in case s or pre are modified by intervening logic (ditto Suffix). + firstStmt := curIfStmt.Child(ifStmt.Body).Child(ifStmt.Body.List[0]) + for curCall := range firstStmt.Preorder((*ast.CallExpr)(nil)) { + call1 := curCall.Node().(*ast.CallExpr) + obj1 := typeutil.Callee(info, call1) + // bytesTrimPrefix or stringsTrimPrefix might be nil if the file doesn't import it, + // so we need to ensure the obj1 is not nil otherwise the call1 is not TrimPrefix and cause a panic (ditto Suffix). + if obj1 == nil || + obj1 != stringsTrimPrefix && obj1 != bytesTrimPrefix && + obj1 != stringsTrimSuffix && obj1 != bytesTrimSuffix { + continue + } + + isPrefix1 := strings.HasSuffix(obj1.Name(), "Prefix") + var cutFuncName, varName, message, fixMessage string + if isPrefix && isPrefix1 { + cutFuncName = "CutPrefix" + varName = "after" + message = "HasPrefix + TrimPrefix can be simplified to CutPrefix" + fixMessage = "Replace HasPrefix/TrimPrefix with CutPrefix" + } else if !isPrefix && !isPrefix1 { + cutFuncName = "CutSuffix" + varName = "before" + message = "HasSuffix + TrimSuffix can be simplified to CutSuffix" + fixMessage = "Replace HasSuffix/TrimSuffix with CutSuffix" + } else { + continue + } + + // Have: if strings.HasPrefix(s0, pre0) { ...strings.TrimPrefix(s, pre)... } (ditto Suffix) + var ( + s0 = call.Args[0] + pre0 = call.Args[1] + s = call1.Args[0] + pre = call1.Args[1] + ) + + // check whether the obj1 uses the exact the same argument with strings.HasPrefix + // shadow variables won't be valid because we only access the first statement (ditto Suffix). + if astutil.EqualSyntax(s0, s) && astutil.EqualSyntax(pre0, pre) { + after := refactor.FreshName(info.Scopes[ifStmt], ifStmt.Pos(), varName) + prefix, importEdits := refactor.AddImport( + info, + curFile.Node().(*ast.File), + obj1.Pkg().Name(), + obj1.Pkg().Path(), + cutFuncName, + call.Pos(), + ) + okVarName := refactor.FreshName(info.Scopes[ifStmt], ifStmt.Pos(), "ok") + pass.Report(analysis.Diagnostic{ + // highlight at HasPrefix call (ditto Suffix). + Pos: call.Pos(), + End: call.End(), + Message: message, + SuggestedFixes: []analysis.SuggestedFix{{ + Message: fixMessage, + // if strings.HasPrefix(s, pre) { use(strings.TrimPrefix(s, pre)) } + // ------------ ----------------- ----- -------------------------- + // if after, ok := strings.CutPrefix(s, pre); ok { use(after) } + // (ditto Suffix) + TextEdits: append(importEdits, []analysis.TextEdit{ + { + Pos: call.Fun.Pos(), + End: call.Fun.Pos(), + NewText: fmt.Appendf(nil, "%s, %s :=", after, okVarName), + }, + { + Pos: call.Fun.Pos(), + End: call.Fun.End(), + NewText: fmt.Appendf(nil, "%s%s", prefix, cutFuncName), + }, + { + Pos: call.End(), + End: call.End(), + NewText: fmt.Appendf(nil, "; %s ", okVarName), + }, + { + Pos: call1.Pos(), + End: call1.End(), + NewText: []byte(after), + }, + }...), + }}}, + ) + break + } + } + } + + // pattern2 + if bin, ok := ifStmt.Cond.(*ast.BinaryExpr); ok && + bin.Op == token.NEQ && + ifStmt.Init != nil && + isSimpleAssign(ifStmt.Init) { + assign := ifStmt.Init.(*ast.AssignStmt) + if call, ok := assign.Rhs[0].(*ast.CallExpr); ok && assign.Tok == token.DEFINE { + lhs := assign.Lhs[0] + obj := typeutil.Callee(info, call) + + if obj == nil || + obj != stringsTrimPrefix && obj != bytesTrimPrefix && obj != stringsTrimSuffix && obj != bytesTrimSuffix { + continue + } + + isPrefix1 := strings.HasSuffix(obj.Name(), "Prefix") + var cutFuncName, message, fixMessage string + if isPrefix1 { + cutFuncName = "CutPrefix" + message = "TrimPrefix can be simplified to CutPrefix" + fixMessage = "Replace TrimPrefix with CutPrefix" + } else { + cutFuncName = "CutSuffix" + message = "TrimSuffix can be simplified to CutSuffix" + fixMessage = "Replace TrimSuffix with CutSuffix" + } + + if astutil.EqualSyntax(lhs, bin.X) && astutil.EqualSyntax(call.Args[0], bin.Y) || + (astutil.EqualSyntax(lhs, bin.Y) && astutil.EqualSyntax(call.Args[0], bin.X)) { + okVarName := refactor.FreshName(info.Scopes[ifStmt], ifStmt.Pos(), "ok") + // Have one of: + // if rest := TrimPrefix(s, prefix); rest != s { (ditto Suffix) + // if rest := TrimPrefix(s, prefix); s != rest { (ditto Suffix) + + // We use AddImport not to add an import (since it exists already) + // but to compute the correct prefix in the dot-import case. + prefix, importEdits := refactor.AddImport( + info, + curFile.Node().(*ast.File), + obj.Pkg().Name(), + obj.Pkg().Path(), + cutFuncName, + call.Pos(), + ) + + pass.Report(analysis.Diagnostic{ + // highlight from the init and the condition end. + Pos: ifStmt.Init.Pos(), + End: ifStmt.Cond.End(), + Message: message, + SuggestedFixes: []analysis.SuggestedFix{{ + Message: fixMessage, + // if x := strings.TrimPrefix(s, pre); x != s ... + // ---- ---------- ------ + // if x, ok := strings.CutPrefix (s, pre); ok ... + // (ditto Suffix) + TextEdits: append(importEdits, []analysis.TextEdit{ + { + Pos: assign.Lhs[0].End(), + End: assign.Lhs[0].End(), + NewText: fmt.Appendf(nil, ", %s", okVarName), + }, + { + Pos: call.Fun.Pos(), + End: call.Fun.End(), + NewText: fmt.Appendf(nil, "%s%s", prefix, cutFuncName), + }, + { + Pos: ifStmt.Cond.Pos(), + End: ifStmt.Cond.End(), + NewText: []byte(okVarName), + }, + }...), + }}, + }) + } + } + } + } + } + return nil, nil +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringsseq.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringsseq.go new file mode 100644 index 00000000000..ef2b5463640 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringsseq.go @@ -0,0 +1,145 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/edge" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysisinternal/generated" + typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/typesinternal/typeindex" +) + +var StringsSeqAnalyzer = &analysis.Analyzer{ + Name: "stringsseq", + Doc: analysisinternal.MustExtractDoc(doc, "stringsseq"), + Requires: []*analysis.Analyzer{ + generated.Analyzer, + inspect.Analyzer, + typeindexanalyzer.Analyzer, + }, + Run: stringsseq, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#stringsseq", +} + +// stringsseq offers a fix to replace a call to strings.Split with +// SplitSeq or strings.Fields with FieldsSeq +// when it is the operand of a range loop, either directly: +// +// for _, line := range strings.Split() {...} +// +// or indirectly, if the variable's sole use is the range statement: +// +// lines := strings.Split() +// for _, line := range lines {...} +// +// Variants: +// - bytes.SplitSeq +// - bytes.FieldsSeq +func stringsseq(pass *analysis.Pass) (any, error) { + skipGenerated(pass) + + var ( + inspect = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + info = pass.TypesInfo + + stringsSplit = index.Object("strings", "Split") + stringsFields = index.Object("strings", "Fields") + bytesSplit = index.Object("bytes", "Split") + bytesFields = index.Object("bytes", "Fields") + ) + if !index.Used(stringsSplit, stringsFields, bytesSplit, bytesFields) { + return nil, nil + } + + for curFile := range filesUsing(inspect, info, "go1.24") { + for curRange := range curFile.Preorder((*ast.RangeStmt)(nil)) { + rng := curRange.Node().(*ast.RangeStmt) + + // Reject "for i, line := ..." since SplitSeq is not an iter.Seq2. + // (We require that i is blank.) + if id, ok := rng.Key.(*ast.Ident); ok && id.Name != "_" { + continue + } + + // Find the call operand of the range statement, + // whether direct or indirect. + call, ok := rng.X.(*ast.CallExpr) + if !ok { + if id, ok := rng.X.(*ast.Ident); ok { + if v, ok := info.Uses[id].(*types.Var); ok { + if ek, idx := curRange.ParentEdge(); ek == edge.BlockStmt_List && idx > 0 { + curPrev, _ := curRange.PrevSibling() + if assign, ok := curPrev.Node().(*ast.AssignStmt); ok && + assign.Tok == token.DEFINE && + len(assign.Lhs) == 1 && + len(assign.Rhs) == 1 && + info.Defs[assign.Lhs[0].(*ast.Ident)] == v && + soleUseIs(index, v, id) { + // Have: + // lines := ... + // for _, line := range lines {...} + // and no other uses of lines. + call, _ = assign.Rhs[0].(*ast.CallExpr) + } + } + } + } + } + + if call != nil { + var edits []analysis.TextEdit + if rng.Key != nil { + // Delete (blank) RangeStmt.Key: + // for _, line := -> for line := + // for _, _ := -> for + // for _ := -> for + end := rng.Range + if rng.Value != nil { + end = rng.Value.Pos() + } + edits = append(edits, analysis.TextEdit{ + Pos: rng.Key.Pos(), + End: end, + }) + } + + sel, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + continue + } + + switch obj := typeutil.Callee(info, call); obj { + case stringsSplit, stringsFields, bytesSplit, bytesFields: + oldFnName := obj.Name() + seqFnName := fmt.Sprintf("%sSeq", oldFnName) + pass.Report(analysis.Diagnostic{ + Pos: sel.Pos(), + End: sel.End(), + Message: fmt.Sprintf("Ranging over %s is more efficient", seqFnName), + SuggestedFixes: []analysis.SuggestedFix{{ + Message: fmt.Sprintf("Replace %s with %s", oldFnName, seqFnName), + TextEdits: append(edits, analysis.TextEdit{ + Pos: sel.Sel.Pos(), + End: sel.Sel.End(), + NewText: []byte(seqFnName)}), + }}, + }) + } + } + } + } + return nil, nil +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/testingcontext.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/testingcontext.go new file mode 100644 index 00000000000..558cf142dd6 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/testingcontext.go @@ -0,0 +1,253 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/edge" + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysisinternal/generated" + typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/typesinternal/typeindex" +) + +var TestingContextAnalyzer = &analysis.Analyzer{ + Name: "testingcontext", + Doc: analysisinternal.MustExtractDoc(doc, "testingcontext"), + Requires: []*analysis.Analyzer{ + generated.Analyzer, + inspect.Analyzer, + typeindexanalyzer.Analyzer, + }, + Run: testingContext, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#testingcontext", +} + +// The testingContext pass replaces calls to context.WithCancel from within +// tests to a use of testing.{T,B,F}.Context(), added in Go 1.24. +// +// Specifically, the testingContext pass suggests to replace: +// +// ctx, cancel := context.WithCancel(context.Background()) // or context.TODO +// defer cancel() +// +// with: +// +// ctx := t.Context() +// +// provided: +// +// - ctx and cancel are declared by the assignment +// - the deferred call is the only use of cancel +// - the call is within a test or subtest function +// - the relevant testing.{T,B,F} is named and not shadowed at the call +func testingContext(pass *analysis.Pass) (any, error) { + skipGenerated(pass) + + var ( + index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + info = pass.TypesInfo + + contextWithCancel = index.Object("context", "WithCancel") + ) + +calls: + for cur := range index.Calls(contextWithCancel) { + call := cur.Node().(*ast.CallExpr) + // Have: context.WithCancel(...) + + arg, ok := call.Args[0].(*ast.CallExpr) + if !ok { + continue + } + if !typesinternal.IsFunctionNamed(typeutil.Callee(info, arg), "context", "Background", "TODO") { + continue + } + // Have: context.WithCancel(context.{Background,TODO}()) + + parent := cur.Parent() + assign, ok := parent.Node().(*ast.AssignStmt) + if !ok || assign.Tok != token.DEFINE { + continue + } + // Have: a, b := context.WithCancel(context.{Background,TODO}()) + + // Check that both a and b are declared, not redeclarations. + var lhs []types.Object + for _, expr := range assign.Lhs { + id, ok := expr.(*ast.Ident) + if !ok { + continue calls + } + obj, ok := info.Defs[id] + if !ok { + continue calls + } + lhs = append(lhs, obj) + } + + next, ok := parent.NextSibling() + if !ok { + continue + } + defr, ok := next.Node().(*ast.DeferStmt) + if !ok { + continue + } + deferId, ok := defr.Call.Fun.(*ast.Ident) + if !ok || !soleUseIs(index, lhs[1], deferId) { + continue // b is used elsewhere + } + // Have: + // a, b := context.WithCancel(context.{Background,TODO}()) + // defer b() + + // Check that we are in a test func. + var testObj types.Object // relevant testing.{T,B,F}, or nil + if curFunc, ok := enclosingFunc(cur); ok { + switch n := curFunc.Node().(type) { + case *ast.FuncLit: + if ek, idx := curFunc.ParentEdge(); ek == edge.CallExpr_Args && idx == 1 { + // Have: call(..., func(...) { ...context.WithCancel(...)... }) + obj := typeutil.Callee(info, curFunc.Parent().Node().(*ast.CallExpr)) + if (typesinternal.IsMethodNamed(obj, "testing", "T", "Run") || + typesinternal.IsMethodNamed(obj, "testing", "B", "Run")) && + len(n.Type.Params.List[0].Names) == 1 { + + // Have tb.Run(..., func(..., tb *testing.[TB]) { ...context.WithCancel(...)... } + testObj = info.Defs[n.Type.Params.List[0].Names[0]] + } + } + + case *ast.FuncDecl: + testObj = isTestFn(info, n) + } + } + if testObj != nil && fileUses(info, astutil.EnclosingFile(cur), "go1.24") { + // Have a test function. Check that we can resolve the relevant + // testing.{T,B,F} at the current position. + if _, obj := lhs[0].Parent().LookupParent(testObj.Name(), lhs[0].Pos()); obj == testObj { + pass.Report(analysis.Diagnostic{ + Pos: call.Fun.Pos(), + End: call.Fun.End(), + Message: fmt.Sprintf("context.WithCancel can be modernized using %s.Context", testObj.Name()), + SuggestedFixes: []analysis.SuggestedFix{{ + Message: fmt.Sprintf("Replace context.WithCancel with %s.Context", testObj.Name()), + TextEdits: []analysis.TextEdit{{ + Pos: assign.Pos(), + End: defr.End(), + NewText: fmt.Appendf(nil, "%s := %s.Context()", lhs[0].Name(), testObj.Name()), + }}, + }}, + }) + } + } + } + return nil, nil +} + +// soleUseIs reports whether id is the sole Ident that uses obj. +// (It returns false if there were no uses of obj.) +func soleUseIs(index *typeindex.Index, obj types.Object, id *ast.Ident) bool { + empty := true + for use := range index.Uses(obj) { + empty = false + if use.Node() != id { + return false + } + } + return !empty +} + +// isTestFn checks whether fn is a test function (TestX, BenchmarkX, FuzzX), +// returning the corresponding types.Object of the *testing.{T,B,F} argument. +// Returns nil if fn is a test function, but the testing.{T,B,F} argument is +// unnamed (or _). +// +// TODO(rfindley): consider handling the case of an unnamed argument, by adding +// an edit to give the argument a name. +// +// Adapted from go/analysis/passes/tests. +// TODO(rfindley): consider refactoring to share logic. +func isTestFn(info *types.Info, fn *ast.FuncDecl) types.Object { + // Want functions with 0 results and 1 parameter. + if fn.Type.Results != nil && len(fn.Type.Results.List) > 0 || + fn.Type.Params == nil || + len(fn.Type.Params.List) != 1 || + len(fn.Type.Params.List[0].Names) != 1 { + + return nil + } + + prefix := testKind(fn.Name.Name) + if prefix == "" { + return nil + } + + if tparams := fn.Type.TypeParams; tparams != nil && len(tparams.List) > 0 { + return nil // test functions must not be generic + } + + obj := info.Defs[fn.Type.Params.List[0].Names[0]] + if obj == nil { + return nil // e.g. _ *testing.T + } + + var name string + switch prefix { + case "Test": + name = "T" + case "Benchmark": + name = "B" + case "Fuzz": + name = "F" + } + + if !typesinternal.IsPointerToNamed(obj.Type(), "testing", name) { + return nil + } + return obj +} + +// testKind returns "Test", "Benchmark", or "Fuzz" if name is a valid resp. +// test, benchmark, or fuzz function name. Otherwise, isTestName returns "". +// +// Adapted from go/analysis/passes/tests.isTestName. +func testKind(name string) string { + var prefix string + switch { + case strings.HasPrefix(name, "Test"): + prefix = "Test" + case strings.HasPrefix(name, "Benchmark"): + prefix = "Benchmark" + case strings.HasPrefix(name, "Fuzz"): + prefix = "Fuzz" + } + if prefix == "" { + return "" + } + suffix := name[len(prefix):] + if len(suffix) == 0 { + // "Test" is ok. + return prefix + } + r, _ := utf8.DecodeRuneInString(suffix) + if unicode.IsLower(r) { + return "" + } + return prefix +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/waitgroup.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/waitgroup.go new file mode 100644 index 00000000000..b890f334ba1 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/waitgroup.go @@ -0,0 +1,175 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +import ( + "bytes" + "fmt" + "go/ast" + "go/printer" + "slices" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysisinternal/generated" + typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/refactor" + "golang.org/x/tools/internal/typesinternal/typeindex" +) + +var WaitGroupAnalyzer = &analysis.Analyzer{ + Name: "waitgroup", + Doc: analysisinternal.MustExtractDoc(doc, "waitgroup"), + Requires: []*analysis.Analyzer{ + generated.Analyzer, + inspect.Analyzer, + typeindexanalyzer.Analyzer, + }, + Run: waitgroup, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#waitgroup", +} + +// The waitgroup pass replaces old more complex code with +// go1.25 added API WaitGroup.Go. +// +// Patterns: +// +// 1. wg.Add(1); go func() { defer wg.Done(); ... }() +// => +// wg.Go(go func() { ... }) +// +// 2. wg.Add(1); go func() { ...; wg.Done() }() +// => +// wg.Go(go func() { ... }) +// +// The wg.Done must occur within the first statement of the block in a +// defer format or last statement of the block, and the offered fix +// only removes the first/last wg.Done call. It doesn't fix existing +// wrong usage of sync.WaitGroup. +// +// The use of WaitGroup.Go in pattern 1 implicitly introduces a +// 'defer', which may change the behavior in the case of panic from +// the "..." logic. In this instance, the change is safe: before and +// after the transformation, an unhandled panic inevitably results in +// a fatal crash. The fact that the transformed code calls wg.Done() +// before the crash doesn't materially change anything. (If Done had +// other effects, or blocked, or if WaitGroup.Go propagated panics +// from child to parent goroutine, the argument would be different.) +func waitgroup(pass *analysis.Pass) (any, error) { + skipGenerated(pass) + + var ( + index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + info = pass.TypesInfo + syncWaitGroupAdd = index.Selection("sync", "WaitGroup", "Add") + syncWaitGroupDone = index.Selection("sync", "WaitGroup", "Done") + ) + if !index.Used(syncWaitGroupDone) { + return nil, nil + } + + for curAddCall := range index.Calls(syncWaitGroupAdd) { + // Extract receiver from wg.Add call. + addCall := curAddCall.Node().(*ast.CallExpr) + if !isIntLiteral(info, addCall.Args[0], 1) { + continue // not a call to wg.Add(1) + } + // Inv: the Args[0] check ensures addCall is not of + // the form sync.WaitGroup.Add(&wg, 1). + addCallRecv := ast.Unparen(addCall.Fun).(*ast.SelectorExpr).X + + // Following statement must be go func() { ... } (). + curAddStmt := curAddCall.Parent() + if !is[*ast.ExprStmt](curAddStmt.Node()) { + continue // unnecessary parens? + } + curNext, ok := curAddCall.Parent().NextSibling() + if !ok { + continue // no successor + } + goStmt, ok := curNext.Node().(*ast.GoStmt) + if !ok { + continue // not a go stmt + } + lit, ok := goStmt.Call.Fun.(*ast.FuncLit) + if !ok || len(goStmt.Call.Args) != 0 { + continue // go argument is not func(){...}() + } + list := lit.Body.List + if len(list) == 0 { + continue + } + + // Body must start with "defer wg.Done()" or end with "wg.Done()". + var doneStmt ast.Stmt + if deferStmt, ok := list[0].(*ast.DeferStmt); ok && + typeutil.Callee(info, deferStmt.Call) == syncWaitGroupDone && + astutil.EqualSyntax(ast.Unparen(deferStmt.Call.Fun).(*ast.SelectorExpr).X, addCallRecv) { + doneStmt = deferStmt // "defer wg.Done()" + + } else if lastStmt, ok := list[len(list)-1].(*ast.ExprStmt); ok { + if doneCall, ok := lastStmt.X.(*ast.CallExpr); ok && + typeutil.Callee(info, doneCall) == syncWaitGroupDone && + astutil.EqualSyntax(ast.Unparen(doneCall.Fun).(*ast.SelectorExpr).X, addCallRecv) { + doneStmt = lastStmt // "wg.Done()" + } + } + if doneStmt == nil { + continue + } + curDoneStmt, ok := curNext.FindNode(doneStmt) + if !ok { + panic("can't find Cursor for 'done' statement") + } + + file := astutil.EnclosingFile(curAddCall) + if !fileUses(info, file, "go1.25") { + continue + } + tokFile := pass.Fset.File(file.Pos()) + + var addCallRecvText bytes.Buffer + err := printer.Fprint(&addCallRecvText, pass.Fset, addCallRecv) + if err != nil { + continue // error getting text for the edit + } + + pass.Report(analysis.Diagnostic{ + Pos: addCall.Pos(), + End: goStmt.End(), + Message: "Goroutine creation can be simplified using WaitGroup.Go", + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Simplify by using WaitGroup.Go", + TextEdits: slices.Concat( + // delete "wg.Add(1)" + refactor.DeleteStmt(tokFile, curAddStmt), + // delete "wg.Done()" or "defer wg.Done()" + refactor.DeleteStmt(tokFile, curDoneStmt), + []analysis.TextEdit{ + // go func() + // ------ + // wg.Go(func() + { + Pos: goStmt.Pos(), + End: goStmt.Call.Pos(), + NewText: fmt.Appendf(nil, "%s.Go(", addCallRecvText.String()), + }, + // ... }() + // - + // ... } ) + { + Pos: goStmt.Call.Lparen, + End: goStmt.Call.Rparen, + }, + }, + ), + }}, + }) + } + return nil, nil +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go index fa1883b0c34..2b5a7c80378 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go @@ -14,8 +14,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysisinternal" "golang.org/x/tools/internal/typesinternal" ) @@ -24,7 +24,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "nilfunc", - Doc: analysisutil.MustExtractDoc(doc, "nilfunc"), + Doc: analysisinternal.MustExtractDoc(doc, "nilfunc"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/nilfunc", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/doc.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/doc.go index eebf40208d1..f04e4414341 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/doc.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/doc.go @@ -82,6 +82,16 @@ // ... // } // +// A local function may also be inferred as a printf wrapper. If it +// is assigned to a variable, each call made through that variable will +// be checked just like a call to a function: +// +// logf := func(format string, args ...any) { +// message := fmt.Sprintf(format, args...) +// log.Printf("%s: %s", prefix, message) +// } +// logf("%s", 123) // logf format %s has arg 123 of wrong type int +// // # Specifying printf wrappers by flag // // The -funcs flag specifies a comma-separated list of names of diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go index f008eca36fe..910ffe70d7e 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go @@ -18,13 +18,14 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/edge" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" "golang.org/x/tools/internal/analysisinternal" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/fmtstr" "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal" "golang.org/x/tools/internal/versions" ) @@ -37,11 +38,11 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "printf", - Doc: analysisutil.MustExtractDoc(doc, "printf"), + Doc: analysisinternal.MustExtractDoc(doc, "printf"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/printf", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, - ResultType: reflect.TypeOf((*Result)(nil)), + ResultType: reflect.TypeFor[*Result](), FactTypes: []analysis.Fact{new(isWrapper)}, } @@ -70,7 +71,7 @@ func (kind Kind) String() string { // Result is the printf analyzer's result type. Clients may query the result // to learn whether a function behaves like fmt.Print or fmt.Printf. type Result struct { - funcs map[*types.Func]Kind + funcs map[types.Object]Kind } // Kind reports whether fn behaves like fmt.Print or fmt.Printf. @@ -111,149 +112,210 @@ func (f *isWrapper) String() string { func run(pass *analysis.Pass) (any, error) { res := &Result{ - funcs: make(map[*types.Func]Kind), + funcs: make(map[types.Object]Kind), } - findPrintfLike(pass, res) - checkCalls(pass) + findPrintLike(pass, res) + checkCalls(pass, res) return res, nil } -type printfWrapper struct { - obj *types.Func - fdecl *ast.FuncDecl - format *types.Var - args *types.Var +// A wrapper is a candidate print/printf wrapper function. +// +// We represent functions generally as types.Object, not *Func, so +// that we can analyze anonymous functions such as +// +// printf := func(format string, args ...any) {...}, +// +// representing them by the *types.Var symbol for the local variable +// 'printf'. +type wrapper struct { + obj types.Object // *Func or *Var + curBody inspector.Cursor // for *ast.BlockStmt + format *types.Var // optional "format string" parameter in the Func{Decl,Lit} + args *types.Var // "args ...any" parameter in the Func{Decl,Lit} callers []printfCaller - failed bool // if true, not a printf wrapper } type printfCaller struct { - w *printfWrapper + w *wrapper call *ast.CallExpr } -// maybePrintfWrapper decides whether decl (a declared function) may be a wrapper -// around a fmt.Printf or fmt.Print function. If so it returns a printfWrapper -// function describing the declaration. Later processing will analyze the -// graph of potential printf wrappers to pick out the ones that are true wrappers. -// A function may be a Printf or Print wrapper if its last argument is ...interface{}. -// If the next-to-last argument is a string, then this may be a Printf wrapper. -// Otherwise it may be a Print wrapper. -func maybePrintfWrapper(info *types.Info, decl ast.Decl) *printfWrapper { - // Look for functions with final argument type ...interface{}. - fdecl, ok := decl.(*ast.FuncDecl) - if !ok || fdecl.Body == nil { - return nil - } - fn, ok := info.Defs[fdecl.Name].(*types.Func) - // Type information may be incomplete. - if !ok { - return nil - } - - sig := fn.Type().(*types.Signature) +// formatArgsParams returns the "format string" and "args ...any" +// parameters of a potential print or printf wrapper function. +// (The format is nil in the print-like case.) +func formatArgsParams(sig *types.Signature) (format, args *types.Var) { if !sig.Variadic() { - return nil // not variadic + return nil, nil // not variadic } params := sig.Params() nparams := params.Len() // variadic => nonzero - // Check final parameter is "args ...interface{}". - args := params.At(nparams - 1) - iface, ok := types.Unalias(args.Type().(*types.Slice).Elem()).(*types.Interface) - if !ok || !iface.Empty() { - return nil - } - // Is second last param 'format string'? - var format *types.Var if nparams >= 2 { if p := params.At(nparams - 2); p.Type() == types.Typ[types.String] { format = p } } - return &printfWrapper{ - obj: fn, - fdecl: fdecl, - format: format, - args: args, + // Check final parameter is "args ...any". + // (variadic => slice) + args = params.At(nparams - 1) + iface, ok := types.Unalias(args.Type().(*types.Slice).Elem()).(*types.Interface) + if !ok || !iface.Empty() { + return nil, nil } + + return format, args } -// findPrintfLike scans the entire package to find printf-like functions. -func findPrintfLike(pass *analysis.Pass, res *Result) (any, error) { - // Gather potential wrappers and call graph between them. - byObj := make(map[*types.Func]*printfWrapper) - var wrappers []*printfWrapper - for _, file := range pass.Files { - for _, decl := range file.Decls { - w := maybePrintfWrapper(pass.TypesInfo, decl) - if w == nil { - continue +// findPrintLike scans the entire package to find print or printf-like functions. +// When it returns, all such functions have been identified. +func findPrintLike(pass *analysis.Pass, res *Result) { + var ( + inspect = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + info = pass.TypesInfo + ) + + // Pass 1: gather candidate wrapper functions (and populate wrappers). + var ( + wrappers []*wrapper + byObj = make(map[types.Object]*wrapper) + ) + for cur := range inspect.Root().Preorder((*ast.FuncDecl)(nil), (*ast.FuncLit)(nil)) { + var ( + curBody inspector.Cursor // for *ast.BlockStmt + sig *types.Signature + obj types.Object + ) + switch f := cur.Node().(type) { + case *ast.FuncDecl: + // named function or method: + // + // func wrapf(format string, args ...any) {...} + if f.Body != nil { + curBody = cur.ChildAt(edge.FuncDecl_Body, -1) + obj = info.Defs[f.Name] + sig = obj.Type().(*types.Signature) + } + + case *ast.FuncLit: + // anonymous function directly assigned to a variable: + // + // var wrapf = func(format string, args ...any) {...} + // wrapf := func(format string, args ...any) {...} + // wrapf = func(format string, args ...any) {...} + // + // The LHS may also be a struct field x.wrapf or + // an imported var pkg.Wrapf. + // + sig = info.TypeOf(f).(*types.Signature) + curBody = cur.ChildAt(edge.FuncLit_Body, -1) + var lhs ast.Expr + switch ek, idx := cur.ParentEdge(); ek { + case edge.ValueSpec_Values: + curName := cur.Parent().ChildAt(edge.ValueSpec_Names, idx) + lhs = curName.Node().(*ast.Ident) + case edge.AssignStmt_Rhs: + curLhs := cur.Parent().ChildAt(edge.AssignStmt_Lhs, idx) + lhs = curLhs.Node().(ast.Expr) + } + + switch lhs := lhs.(type) { + case *ast.Ident: + // variable: wrapf = func(...) + obj = info.ObjectOf(lhs).(*types.Var) + case *ast.SelectorExpr: + if sel, ok := info.Selections[lhs]; ok { + // struct field: x.wrapf = func(...) + obj = sel.Obj().(*types.Var) + } else { + // imported var: pkg.Wrapf = func(...) + obj = info.Uses[lhs.Sel].(*types.Var) + } + } + } + if obj != nil { + format, args := formatArgsParams(sig) + if args != nil { + // obj (the symbol for a function/method, or variable + // assigned to an anonymous function) is a potential + // print or printf wrapper. + // + // Later processing will analyze the graph of potential + // wrappers and their function bodies to pick out the + // ones that are true wrappers. + w := &wrapper{ + obj: obj, + curBody: curBody, + format: format, // non-nil => printf + args: args, + } + byObj[w.obj] = w + wrappers = append(wrappers, w) } - byObj[w.obj] = w - wrappers = append(wrappers, w) } } - // Walk the graph to figure out which are really printf wrappers. + // Pass 2: scan the body of each wrapper function + // for calls to other printf-like functions. + // + // Also, reject tricky cases where the parameters + // are potentially mutated by AssignStmt or UnaryExpr. + // TODO: Relax these checks; issue 26555. for _, w := range wrappers { - // Scan function for calls that could be to other printf-like functions. - ast.Inspect(w.fdecl.Body, func(n ast.Node) bool { - if w.failed { - return false - } + scan: + for cur := range w.curBody.Preorder( + (*ast.AssignStmt)(nil), + (*ast.UnaryExpr)(nil), + (*ast.CallExpr)(nil), + ) { + switch n := cur.Node().(type) { + case *ast.AssignStmt: + // If the wrapper updates format or args + // it is not a simple wrapper. + for _, lhs := range n.Lhs { + if w.format != nil && match(info, lhs, w.format) || + match(info, lhs, w.args) { + break scan + } + } - // TODO: Relax these checks; issue 26555. - if assign, ok := n.(*ast.AssignStmt); ok { - for _, lhs := range assign.Lhs { - if match(pass.TypesInfo, lhs, w.format) || - match(pass.TypesInfo, lhs, w.args) { - // Modifies the format - // string or args in - // some way, so not a - // simple wrapper. - w.failed = true - return false + case *ast.UnaryExpr: + // If the wrapper computes &format or &args, + // it is not a simple wrapper. + if n.Op == token.AND && + (w.format != nil && match(info, n.X, w.format) || + match(info, n.X, w.args)) { + break scan + } + + case *ast.CallExpr: + if len(n.Args) > 0 && match(info, n.Args[len(n.Args)-1], w.args) { + if callee := typeutil.Callee(pass.TypesInfo, n); callee != nil { + + // Call from one wrapper candidate to another? + // Record the edge so that if callee is found to be + // a true wrapper, w will be too. + if w2, ok := byObj[callee]; ok { + w2.callers = append(w2.callers, printfCaller{w, n}) + } + + // Is the candidate a true wrapper, because it calls + // a known print{,f}-like function from the allowlist + // or an imported fact, or another wrapper found + // to be a true wrapper? + // If so, convert all w's callers to kind. + kind := callKind(pass, callee, res) + if kind != KindNone { + checkForward(pass, w, n, kind, res) + } } } } - if un, ok := n.(*ast.UnaryExpr); ok && un.Op == token.AND { - if match(pass.TypesInfo, un.X, w.format) || - match(pass.TypesInfo, un.X, w.args) { - // Taking the address of the - // format string or args, - // so not a simple wrapper. - w.failed = true - return false - } - } - - call, ok := n.(*ast.CallExpr) - if !ok || len(call.Args) == 0 || !match(pass.TypesInfo, call.Args[len(call.Args)-1], w.args) { - return true - } - - fn, kind := printfNameAndKind(pass, call) - if kind != 0 { - checkPrintfFwd(pass, w, call, kind, res) - return true - } - - // If the call is to another function in this package, - // maybe we will find out it is printf-like later. - // Remember this call for later checking. - if fn != nil && fn.Pkg() == pass.Pkg && byObj[fn] != nil { - callee := byObj[fn] - callee.callers = append(callee.callers, printfCaller{w, call}) - } - - return true - }) + } } - return nil, nil } func match(info *types.Info, arg ast.Expr, param *types.Var) bool { @@ -261,9 +323,9 @@ func match(info *types.Info, arg ast.Expr, param *types.Var) bool { return ok && info.ObjectOf(id) == param } -// checkPrintfFwd checks that a printf-forwarding wrapper is forwarding correctly. +// checkForward checks that a forwarding wrapper is forwarding correctly. // It diagnoses writing fmt.Printf(format, args) instead of fmt.Printf(format, args...). -func checkPrintfFwd(pass *analysis.Pass, w *printfWrapper, call *ast.CallExpr, kind Kind, res *Result) { +func checkForward(pass *analysis.Pass, w *wrapper, call *ast.CallExpr, kind Kind, res *Result) { matched := kind == KindPrint || kind != KindNone && len(call.Args) >= 2 && match(pass.TypesInfo, call.Args[len(call.Args)-2], w.format) if !matched { @@ -292,18 +354,39 @@ func checkPrintfFwd(pass *analysis.Pass, w *printfWrapper, call *ast.CallExpr, k pass.ReportRangef(call, "missing ... in args forwarded to %s-like function", desc) return } - fn := w.obj - var fact isWrapper - if !pass.ImportObjectFact(fn, &fact) { - fact.Kind = kind - pass.ExportObjectFact(fn, &fact) - res.funcs[fn] = kind + + // If the candidate's print{,f} status becomes known, + // propagate it back to all its so-far known callers. + if res.funcs[w.obj] != kind { + res.funcs[w.obj] = kind + + // Export a fact. + // (This is a no-op for local symbols.) + // We can't export facts on a symbol of another package, + // but we can treat the symbol as a wrapper within + // the current analysis unit. + if w.obj.Pkg() == pass.Pkg { + // Facts are associated with origins. + pass.ExportObjectFact(origin(w.obj), &isWrapper{Kind: kind}) + } + + // Propagate kind back to known callers. for _, caller := range w.callers { - checkPrintfFwd(pass, caller.w, caller.call, kind, res) + checkForward(pass, caller.w, caller.call, kind, res) } } } +func origin(obj types.Object) types.Object { + switch obj := obj.(type) { + case *types.Func: + return obj.Origin() + case *types.Var: + return obj.Origin() + } + return obj +} + // isPrint records the print functions. // If a key ends in 'f' then it is assumed to be a formatted print. // @@ -412,7 +495,7 @@ func stringConstantExpr(pass *analysis.Pass, expr ast.Expr) (string, bool) { // checkCalls triggers the print-specific checks for calls that invoke a print // function. -func checkCalls(pass *analysis.Pass) { +func checkCalls(pass *analysis.Pass, res *Result) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ (*ast.File)(nil), @@ -426,48 +509,60 @@ func checkCalls(pass *analysis.Pass) { fileVersion = versions.Lang(versions.FileVersion(pass.TypesInfo, n)) case *ast.CallExpr: - fn, kind := printfNameAndKind(pass, n) - switch kind { - case KindPrintf, KindErrorf: - checkPrintf(pass, fileVersion, kind, n, fn.FullName()) - case KindPrint: - checkPrint(pass, n, fn.FullName()) + if callee := typeutil.Callee(pass.TypesInfo, n); callee != nil { + kind := callKind(pass, callee, res) + switch kind { + case KindPrintf, KindErrorf: + checkPrintf(pass, fileVersion, kind, n, fullname(callee)) + case KindPrint: + checkPrint(pass, n, fullname(callee)) + } } } }) } -func printfNameAndKind(pass *analysis.Pass, call *ast.CallExpr) (fn *types.Func, kind Kind) { - fn, _ = typeutil.Callee(pass.TypesInfo, call).(*types.Func) - if fn == nil { - return nil, 0 +func fullname(obj types.Object) string { + if fn, ok := obj.(*types.Func); ok { + return fn.FullName() } + return obj.Name() +} - // Facts are associated with generic declarations, not instantiations. - fn = fn.Origin() - - _, ok := isPrint[fn.FullName()] +// callKind returns the symbol of the called function +// and its print/printf kind, if any. +// (The symbol may be a var for an anonymous function.) +// The result is memoized in res.funcs. +func callKind(pass *analysis.Pass, obj types.Object, res *Result) Kind { + kind, ok := res.funcs[obj] if !ok { - // Next look up just "printf", for use with -printf.funcs. - _, ok = isPrint[strings.ToLower(fn.Name())] - } - if ok { - if fn.FullName() == "fmt.Errorf" { - kind = KindErrorf - } else if strings.HasSuffix(fn.Name(), "f") { - kind = KindPrintf - } else { - kind = KindPrint + // cache miss + _, ok := isPrint[fullname(obj)] + if !ok { + // Next look up just "printf", for use with -printf.funcs. + _, ok = isPrint[strings.ToLower(obj.Name())] } - return fn, kind + if ok { + // well-known printf functions + if fullname(obj) == "fmt.Errorf" { + kind = KindErrorf + } else if strings.HasSuffix(obj.Name(), "f") { + kind = KindPrintf + } else { + kind = KindPrint + } + } else { + // imported wrappers + // Facts are associated with generic declarations, not instantiations. + obj = origin(obj) + var fact isWrapper + if pass.ImportObjectFact(obj, &fact) { + kind = fact.Kind + } + } + res.funcs[obj] = kind // cache } - - var fact isWrapper - if pass.ImportObjectFact(fn, &fact) { - return fn, fact.Kind - } - - return fn, KindNone + return kind } // isFormatter reports whether t could satisfy fmt.Formatter. @@ -490,7 +585,7 @@ func isFormatter(typ types.Type) bool { sig := fn.Type().(*types.Signature) return sig.Params().Len() == 2 && sig.Results().Len() == 0 && - analysisinternal.IsTypeNamed(sig.Params().At(0).Type(), "fmt", "State") && + typesinternal.IsTypeNamed(sig.Params().At(0).Type(), "fmt", "State") && types.Identical(sig.Params().At(1).Type(), types.Typ[types.Rune]) } @@ -729,7 +824,7 @@ func okPrintfArg(pass *analysis.Pass, call *ast.CallExpr, rng analysis.Range, ma if reason != "" { details = " (" + reason + ")" } - pass.ReportRangef(rng, "%s format %s uses non-int %s%s as argument of *", name, operation.Text, analysisinternal.Format(pass.Fset, arg), details) + pass.ReportRangef(rng, "%s format %s uses non-int %s%s as argument of *", name, operation.Text, astutil.Format(pass.Fset, arg), details) return false } } @@ -756,7 +851,7 @@ func okPrintfArg(pass *analysis.Pass, call *ast.CallExpr, rng analysis.Range, ma } arg := call.Args[verbArgIndex] if isFunctionValue(pass, arg) && verb != 'p' && verb != 'T' { - pass.ReportRangef(rng, "%s format %s arg %s is a func value, not called", name, operation.Text, analysisinternal.Format(pass.Fset, arg)) + pass.ReportRangef(rng, "%s format %s arg %s is a func value, not called", name, operation.Text, astutil.Format(pass.Fset, arg)) return false } if reason, ok := matchArgType(pass, v.typ, arg); !ok { @@ -768,14 +863,14 @@ func okPrintfArg(pass *analysis.Pass, call *ast.CallExpr, rng analysis.Range, ma if reason != "" { details = " (" + reason + ")" } - pass.ReportRangef(rng, "%s format %s has arg %s of wrong type %s%s", name, operation.Text, analysisinternal.Format(pass.Fset, arg), typeString, details) + pass.ReportRangef(rng, "%s format %s has arg %s of wrong type %s%s", name, operation.Text, astutil.Format(pass.Fset, arg), typeString, details) return false } // Detect recursive formatting via value's String/Error methods. // The '#' flag suppresses the methods, except with %x, %X, and %q. if v.typ&argString != 0 && v.verb != 'T' && (!strings.Contains(operation.Flags, "#") || strings.ContainsRune("qxX", v.verb)) { if methodName, ok := recursiveStringer(pass, arg); ok { - pass.ReportRangef(rng, "%s format %s with arg %s causes recursive %s method call", name, operation.Text, analysisinternal.Format(pass.Fset, arg), methodName) + pass.ReportRangef(rng, "%s format %s with arg %s causes recursive %s method call", name, operation.Text, astutil.Format(pass.Fset, arg), methodName) return false } } @@ -927,7 +1022,7 @@ func checkPrint(pass *analysis.Pass, call *ast.CallExpr, name string) { if sel, ok := call.Args[0].(*ast.SelectorExpr); ok { if x, ok := sel.X.(*ast.Ident); ok { if x.Name == "os" && strings.HasPrefix(sel.Sel.Name, "Std") { - pass.ReportRangef(call, "%s does not take io.Writer but has first arg %s", name, analysisinternal.Format(pass.Fset, call.Args[0])) + pass.ReportRangef(call, "%s does not take io.Writer but has first arg %s", name, astutil.Format(pass.Fset, call.Args[0])) } } } @@ -961,10 +1056,10 @@ func checkPrint(pass *analysis.Pass, call *ast.CallExpr, name string) { } for _, arg := range args { if isFunctionValue(pass, arg) { - pass.ReportRangef(call, "%s arg %s is a func value, not called", name, analysisinternal.Format(pass.Fset, arg)) + pass.ReportRangef(call, "%s arg %s is a func value, not called", name, astutil.Format(pass.Fset, arg)) } if methodName, ok := recursiveStringer(pass, arg); ok { - pass.ReportRangef(call, "%s arg %s causes recursive call to %s method", name, analysisinternal.Format(pass.Fset, arg), methodName) + pass.ReportRangef(call, "%s arg %s causes recursive call to %s method", name, astutil.Format(pass.Fset, arg), methodName) } } } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go index 57987b3d203..366927326fc 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go @@ -20,7 +20,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typeparams" ) @@ -123,7 +123,7 @@ func checkLongShift(pass *analysis.Pass, node ast.Node, x, y ast.Expr) { } } if amt >= minSize { - ident := analysisinternal.Format(pass.Fset, x) + ident := astutil.Format(pass.Fset, x) qualifier := "" if len(sizes) > 1 { qualifier = "may be " diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go index 78a2fa5ea3b..934f3913c27 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go @@ -18,9 +18,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -29,14 +29,14 @@ var doc string // Analyzer describes sigchanyzer analysis function detector. var Analyzer = &analysis.Analyzer{ Name: "sigchanyzer", - Doc: analysisutil.MustExtractDoc(doc, "sigchanyzer"), + Doc: analysisinternal.MustExtractDoc(doc, "sigchanyzer"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/sigchanyzer", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } func run(pass *analysis.Pass) (any, error) { - if !analysisinternal.Imports(pass.Pkg, "os/signal") { + if !typesinternal.Imports(pass.Pkg, "os/signal") { return nil, nil // doesn't directly import signal } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go index c1ac960435d..2cb91c73299 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go @@ -17,10 +17,10 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typesinternal" ) @@ -29,7 +29,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "slog", - Doc: analysisutil.MustExtractDoc(doc, "slog"), + Doc: analysisinternal.MustExtractDoc(doc, "slog"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/slog", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -115,10 +115,10 @@ func run(pass *analysis.Pass) (any, error) { default: if unknownArg == nil { pass.ReportRangef(arg, "%s arg %q should be a string or a slog.Attr (possible missing key or value)", - shortName(fn), analysisinternal.Format(pass.Fset, arg)) + shortName(fn), astutil.Format(pass.Fset, arg)) } else { pass.ReportRangef(arg, "%s arg %q should probably be a string or a slog.Attr (previous arg %q cannot be a key)", - shortName(fn), analysisinternal.Format(pass.Fset, arg), analysisinternal.Format(pass.Fset, unknownArg)) + shortName(fn), astutil.Format(pass.Fset, arg), astutil.Format(pass.Fset, unknownArg)) } // Stop here so we report at most one missing key per call. return @@ -158,7 +158,7 @@ func run(pass *analysis.Pass) (any, error) { } func isAttr(t types.Type) bool { - return analysisinternal.IsTypeNamed(t, "log/slog", "Attr") + return typesinternal.IsTypeNamed(t, "log/slog", "Attr") } // shortName returns a name for the function that is shorter than FullName. diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go index a0bdf001abd..ca303ae5c15 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go @@ -12,8 +12,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysisinternal" ) //go:embed doc.go @@ -21,7 +21,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "stdmethods", - Doc: analysisutil.MustExtractDoc(doc, "stdmethods"), + Doc: analysisinternal.MustExtractDoc(doc, "stdmethods"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/stdmethods", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go index 7dbff1e4d8d..19c72d2cf93 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go @@ -13,9 +13,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typesinternal" ) @@ -25,7 +25,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "stringintconv", - Doc: analysisutil.MustExtractDoc(doc, "stringintconv"), + Doc: analysisinternal.MustExtractDoc(doc, "stringintconv"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/stringintconv", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -198,7 +198,7 @@ func run(pass *analysis.Pass) (any, error) { // the type has methods, as some {String,GoString,Format} // may change the behavior of fmt.Sprint. if len(ttypes) == 1 && len(vtypes) == 1 && types.NewMethodSet(V0).Len() == 0 { - _, prefix, importEdits := analysisinternal.AddImport(pass.TypesInfo, file, "fmt", "fmt", "Sprint", arg.Pos()) + prefix, importEdits := refactor.AddImport(pass.TypesInfo, file, "fmt", "fmt", "Sprint", arg.Pos()) if types.Identical(T0, types.Typ[types.String]) { // string(x) -> fmt.Sprint(x) addFix("Format the number as a decimal", append(importEdits, diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go index 360ba0e74d8..eba4e56bb05 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go @@ -13,7 +13,6 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" "golang.org/x/tools/internal/analysisinternal" @@ -31,7 +30,7 @@ func init() { var Analyzer = &analysis.Analyzer{ Name: "testinggoroutine", - Doc: analysisutil.MustExtractDoc(doc, "testinggoroutine"), + Doc: analysisinternal.MustExtractDoc(doc, "testinggoroutine"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/testinggoroutine", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -40,7 +39,7 @@ var Analyzer = &analysis.Analyzer{ func run(pass *analysis.Pass) (any, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - if !analysisinternal.Imports(pass.Pkg, "testing") { + if !typesinternal.Imports(pass.Pkg, "testing") { return nil, nil } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go index d4e9b025324..a0ed5ab14e8 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go @@ -15,8 +15,8 @@ import ( "unicode/utf8" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -24,7 +24,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "tests", - Doc: analysisutil.MustExtractDoc(doc, "tests"), + Doc: analysisinternal.MustExtractDoc(doc, "tests"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/tests", Run: run, } @@ -258,7 +258,7 @@ func isTestingType(typ types.Type, testingType string) bool { if !ok { return false } - return analysisinternal.IsTypeNamed(ptr.Elem(), "testing", testingType) + return typesinternal.IsTypeNamed(ptr.Elem(), "testing", testingType) } // Validate that fuzz target function's arguments are of accepted types. diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go index 4fdbb2b5415..45b6822c176 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go @@ -16,10 +16,10 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/typesinternal" ) const badFormat = "2006-02-01" @@ -30,7 +30,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "timeformat", - Doc: analysisutil.MustExtractDoc(doc, "timeformat"), + Doc: analysisinternal.MustExtractDoc(doc, "timeformat"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/timeformat", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -39,7 +39,7 @@ var Analyzer = &analysis.Analyzer{ func run(pass *analysis.Pass) (any, error) { // Note: (time.Time).Format is a method and can be a typeutil.Callee // without directly importing "time". So we cannot just skip this package - // when !analysisutil.Imports(pass.Pkg, "time"). + // when !analysisinternal.Imports(pass.Pkg, "time"). // TODO(taking): Consider using a prepass to collect typeutil.Callees. inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) @@ -50,8 +50,8 @@ func run(pass *analysis.Pass) (any, error) { inspect.Preorder(nodeFilter, func(n ast.Node) { call := n.(*ast.CallExpr) obj := typeutil.Callee(pass.TypesInfo, call) - if !analysisinternal.IsMethodNamed(obj, "time", "Time", "Format") && - !analysisinternal.IsFunctionNamed(obj, "time", "Parse") { + if !typesinternal.IsMethodNamed(obj, "time", "Time", "Format") && + !typesinternal.IsFunctionNamed(obj, "time", "Parse") { return } if len(call.Args) > 0 { diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go index 26e894bd400..4de48c83930 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go @@ -11,9 +11,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysisinternal" "golang.org/x/tools/internal/typesinternal" ) @@ -22,7 +22,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "unmarshal", - Doc: analysisutil.MustExtractDoc(doc, "unmarshal"), + Doc: analysisinternal.MustExtractDoc(doc, "unmarshal"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unmarshal", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -39,7 +39,7 @@ func run(pass *analysis.Pass) (any, error) { // Note: (*"encoding/json".Decoder).Decode, (* "encoding/gob".Decoder).Decode // and (* "encoding/xml".Decoder).Decode are methods and can be a typeutil.Callee // without directly importing their packages. So we cannot just skip this package - // when !analysisutil.Imports(pass.Pkg, "encoding/..."). + // when !analysisinternal.Imports(pass.Pkg, "encoding/..."). // TODO(taking): Consider using a prepass to collect typeutil.Callees. inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go index 317f034992b..668a3352998 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go @@ -14,8 +14,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/refactor" ) //go:embed doc.go @@ -23,7 +24,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "unreachable", - Doc: analysisutil.MustExtractDoc(doc, "unreachable"), + Doc: analysisinternal.MustExtractDoc(doc, "unreachable"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unreachable", Requires: []*analysis.Analyzer{inspect.Analyzer}, RunDespiteErrors: true, @@ -188,6 +189,11 @@ func (d *deadState) findDead(stmt ast.Stmt) { case *ast.EmptyStmt: // do not warn about unreachable empty statements default: + var ( + inspect = d.pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + curStmt, _ = inspect.Root().FindNode(stmt) + tokFile = d.pass.Fset.File(stmt.Pos()) + ) // (This call to pass.Report is a frequent source // of diagnostics beyond EOF in a truncated file; // see #71659.) @@ -196,11 +202,8 @@ func (d *deadState) findDead(stmt ast.Stmt) { End: stmt.End(), Message: "unreachable code", SuggestedFixes: []analysis.SuggestedFix{{ - Message: "Remove", - TextEdits: []analysis.TextEdit{{ - Pos: stmt.Pos(), - End: stmt.End(), - }}, + Message: "Remove", + TextEdits: refactor.DeleteStmt(tokFile, curStmt), }}, }) d.reachable = true // silence error about next statement diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go index 57c6da64ff3..24ff723390f 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go @@ -14,9 +14,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -24,7 +24,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "unsafeptr", - Doc: analysisutil.MustExtractDoc(doc, "unsafeptr"), + Doc: analysisinternal.MustExtractDoc(doc, "unsafeptr"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unsafeptr", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -105,7 +105,7 @@ func isSafeUintptr(info *types.Info, x ast.Expr) bool { } switch sel.Sel.Name { case "Pointer", "UnsafeAddr": - if analysisinternal.IsTypeNamed(info.Types[sel.X].Type, "reflect", "Value") { + if typesinternal.IsTypeNamed(info.Types[sel.X].Type, "reflect", "Value") { return true } } @@ -153,5 +153,5 @@ func hasBasicType(info *types.Info, x ast.Expr, kind types.BasicKind) bool { // isReflectHeader reports whether t is reflect.SliceHeader or reflect.StringHeader. func isReflectHeader(t types.Type) bool { - return analysisinternal.IsTypeNamed(t, "reflect", "SliceHeader", "StringHeader") + return typesinternal.IsTypeNamed(t, "reflect", "SliceHeader", "StringHeader") } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go index ed4cf7ae0be..57ad4f07699 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go @@ -23,7 +23,6 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" "golang.org/x/tools/internal/analysisinternal" @@ -34,7 +33,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "unusedresult", - Doc: analysisutil.MustExtractDoc(doc, "unusedresult"), + Doc: analysisinternal.MustExtractDoc(doc, "unusedresult"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unusedresult", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go index 14c6986eaba..88e4cc86776 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go @@ -13,10 +13,10 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -24,14 +24,14 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "waitgroup", - Doc: analysisutil.MustExtractDoc(doc, "waitgroup"), + Doc: analysisinternal.MustExtractDoc(doc, "waitgroup"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/waitgroup", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } func run(pass *analysis.Pass) (any, error) { - if !analysisinternal.Imports(pass.Pkg, "sync") { + if !typesinternal.Imports(pass.Pkg, "sync") { return nil, nil // doesn't directly import sync } @@ -44,7 +44,7 @@ func run(pass *analysis.Pass) (any, error) { if push { call := n.(*ast.CallExpr) obj := typeutil.Callee(pass.TypesInfo, call) - if analysisinternal.IsMethodNamed(obj, "sync", "WaitGroup", "Add") && + if typesinternal.IsMethodNamed(obj, "sync", "WaitGroup", "Add") && hasSuffix(stack, wantSuffix) && backindex(stack, 1) == backindex(stack, 2).(*ast.BlockStmt).List[0] { // ExprStmt must be Block's first stmt diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go index 7b805b882bf..b407bc77915 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go @@ -75,7 +75,6 @@ type Config struct { VetxOutput string // where to write file of fact information Stdout string // write stdout (e.g. JSON, unified diff) to this file SucceedOnTypecheckFailure bool // obsolete awful hack; see #18395 and below - WarnDiagnostics bool // printing diagnostics should not cause a non-zero exit } // Main is the main function of a vet-like analysis tool that must be @@ -87,18 +86,9 @@ type Config struct { // -V=full describe executable for build caching // foo.cfg perform separate modular analyze on the single // unit described by a JSON config file foo.cfg. -// -// Also, subject to approval of proposal #71859: -// // -fix don't print each diagnostic, apply its first fix // -diff don't apply a fix, print the diff (requires -fix) -// -// Additionally, the environment variable GOVET has the value "vet" or -// "fix" depending on whether the command is being invoked by "go vet", -// to report diagnostics, or "go fix", to apply fixes. This is -// necessary so that callers of Main can select their analyzer suite -// before flag parsing. (Vet analyzers must report real code problems, -// whereas Fix analyzers may fix non-problems such as style issues.) +// -json print diagnostics and fixes in JSON form func Main(analyzers ...*analysis.Analyzer) { progname := filepath.Base(os.Args[0]) log.SetFlags(0) @@ -163,7 +153,7 @@ func Run(configFile string, analyzers []*analysis.Analyzer) { // In VetxOnly mode, the analysis is run only for facts. if !cfg.VetxOnly { - code = processResults(fset, cfg.ID, results, cfg.WarnDiagnostics) + code = processResults(fset, cfg.ID, results) } os.Exit(code) @@ -187,7 +177,7 @@ func readConfig(filename string) (*Config, error) { return cfg, nil } -func processResults(fset *token.FileSet, id string, results []result, warnDiagnostics bool) (exit int) { +func processResults(fset *token.FileSet, id string, results []result) (exit int) { if analysisflags.Fix { // Don't print the diagnostics, // but apply all fixes from the root actions. @@ -236,9 +226,7 @@ func processResults(fset *token.FileSet, id string, results []result, warnDiagno for _, res := range results { for _, diag := range res.diagnostics { analysisflags.PrintPlain(os.Stderr, fset, analysisflags.Context, diag) - if !warnDiagnostics { - exit = 1 - } + exit = 1 } } } diff --git a/src/cmd/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/src/cmd/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go new file mode 100644 index 00000000000..0fb4e7eea81 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -0,0 +1,663 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +// This file defines utilities for working with source positions. + +import ( + "fmt" + "go/ast" + "go/token" + "sort" +) + +// PathEnclosingInterval returns the node that encloses the source +// interval [start, end), and all its ancestors up to the AST root. +// +// The definition of "enclosing" used by this function considers +// additional whitespace abutting a node to be enclosed by it. +// In this example: +// +// z := x + y // add them +// <-A-> +// <----B-----> +// +// the ast.BinaryExpr(+) node is considered to enclose interval B +// even though its [Pos()..End()) is actually only interval A. +// This behaviour makes user interfaces more tolerant of imperfect +// input. +// +// This function treats tokens as nodes, though they are not included +// in the result. e.g. PathEnclosingInterval("+") returns the +// enclosing ast.BinaryExpr("x + y"). +// +// If start==end, the 1-char interval following start is used instead. +// +// The 'exact' result is true if the interval contains only path[0] +// and perhaps some adjacent whitespace. It is false if the interval +// overlaps multiple children of path[0], or if it contains only +// interior whitespace of path[0]. +// In this example: +// +// z := x + y // add them +// <--C--> <---E--> +// ^ +// D +// +// intervals C, D and E are inexact. C is contained by the +// z-assignment statement, because it spans three of its children (:=, +// x, +). So too is the 1-char interval D, because it contains only +// interior whitespace of the assignment. E is considered interior +// whitespace of the BlockStmt containing the assignment. +// +// The resulting path is never empty; it always contains at least the +// 'root' *ast.File. Ideally PathEnclosingInterval would reject +// intervals that lie wholly or partially outside the range of the +// file, but unfortunately ast.File records only the token.Pos of +// the 'package' keyword, but not of the start of the file itself. +func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) { + // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging + + // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end). + var visit func(node ast.Node) bool + visit = func(node ast.Node) bool { + path = append(path, node) + + nodePos := node.Pos() + nodeEnd := node.End() + + // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging + + // Intersect [start, end) with interval of node. + if start < nodePos { + start = nodePos + } + if end > nodeEnd { + end = nodeEnd + } + + // Find sole child that contains [start, end). + children := childrenOf(node) + l := len(children) + for i, child := range children { + // [childPos, childEnd) is unaugmented interval of child. + childPos := child.Pos() + childEnd := child.End() + + // [augPos, augEnd) is whitespace-augmented interval of child. + augPos := childPos + augEnd := childEnd + if i > 0 { + augPos = children[i-1].End() // start of preceding whitespace + } + if i < l-1 { + nextChildPos := children[i+1].Pos() + // Does [start, end) lie between child and next child? + if start >= augEnd && end <= nextChildPos { + return false // inexact match + } + augEnd = nextChildPos // end of following whitespace + } + + // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n", + // i, augPos, augEnd, start, end) // debugging + + // Does augmented child strictly contain [start, end)? + if augPos <= start && end <= augEnd { + if is[tokenNode](child) { + return true + } + + // childrenOf elides the FuncType node beneath FuncDecl. + // Add it back here for TypeParams, Params, Results, + // all FieldLists). But we don't add it back for the "func" token + // even though it is the tree at FuncDecl.Type.Func. + if decl, ok := node.(*ast.FuncDecl); ok { + if fields, ok := child.(*ast.FieldList); ok && fields != decl.Recv { + path = append(path, decl.Type) + } + } + + return visit(child) + } + + // Does [start, end) overlap multiple children? + // i.e. left-augmented child contains start + // but LR-augmented child does not contain end. + if start < childEnd && end > augEnd { + break + } + } + + // No single child contained [start, end), + // so node is the result. Is it exact? + + // (It's tempting to put this condition before the + // child loop, but it gives the wrong result in the + // case where a node (e.g. ExprStmt) and its sole + // child have equal intervals.) + if start == nodePos && end == nodeEnd { + return true // exact match + } + + return false // inexact: overlaps multiple children + } + + // Ensure [start,end) is nondecreasing. + if start > end { + start, end = end, start + } + + if start < root.End() && end > root.Pos() { + if start == end { + end = start + 1 // empty interval => interval of size 1 + } + exact = visit(root) + + // Reverse the path: + for i, l := 0, len(path); i < l/2; i++ { + path[i], path[l-1-i] = path[l-1-i], path[i] + } + } else { + // Selection lies within whitespace preceding the + // first (or following the last) declaration in the file. + // The result nonetheless always includes the ast.File. + path = append(path, root) + } + + return +} + +// tokenNode is a dummy implementation of ast.Node for a single token. +// They are used transiently by PathEnclosingInterval but never escape +// this package. +type tokenNode struct { + pos token.Pos + end token.Pos +} + +func (n tokenNode) Pos() token.Pos { + return n.pos +} + +func (n tokenNode) End() token.Pos { + return n.end +} + +func tok(pos token.Pos, len int) ast.Node { + return tokenNode{pos, pos + token.Pos(len)} +} + +// childrenOf returns the direct non-nil children of ast.Node n. +// It may include fake ast.Node implementations for bare tokens. +// it is not safe to call (e.g.) ast.Walk on such nodes. +func childrenOf(n ast.Node) []ast.Node { + var children []ast.Node + + // First add nodes for all true subtrees. + ast.Inspect(n, func(node ast.Node) bool { + if node == n { // push n + return true // recur + } + if node != nil { // push child + children = append(children, node) + } + return false // no recursion + }) + + // TODO(adonovan): be more careful about missing (!Pos.Valid) + // tokens in trees produced from invalid input. + + // Then add fake Nodes for bare tokens. + switch n := n.(type) { + case *ast.ArrayType: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Elt.End(), len("]"))) + + case *ast.AssignStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.BasicLit: + children = append(children, + tok(n.ValuePos, len(n.Value))) + + case *ast.BinaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.BlockStmt: + if n.Lbrace.IsValid() { + children = append(children, tok(n.Lbrace, len("{"))) + } + if n.Rbrace.IsValid() { + children = append(children, tok(n.Rbrace, len("}"))) + } + + case *ast.BranchStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.CallExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + if n.Ellipsis != 0 { + children = append(children, tok(n.Ellipsis, len("..."))) + } + + case *ast.CaseClause: + if n.List == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.ChanType: + switch n.Dir { + case ast.RECV: + children = append(children, tok(n.Begin, len("<-chan"))) + case ast.SEND: + children = append(children, tok(n.Begin, len("chan<-"))) + case ast.RECV | ast.SEND: + children = append(children, tok(n.Begin, len("chan"))) + } + + case *ast.CommClause: + if n.Comm == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.Comment: + // nop + + case *ast.CommentGroup: + // nop + + case *ast.CompositeLit: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("{"))) + + case *ast.DeclStmt: + // nop + + case *ast.DeferStmt: + children = append(children, + tok(n.Defer, len("defer"))) + + case *ast.Ellipsis: + children = append(children, + tok(n.Ellipsis, len("..."))) + + case *ast.EmptyStmt: + // nop + + case *ast.ExprStmt: + // nop + + case *ast.Field: + // TODO(adonovan): Field.{Doc,Comment,Tag}? + + case *ast.FieldList: + if n.Opening.IsValid() { + children = append(children, tok(n.Opening, len("("))) + } + if n.Closing.IsValid() { + children = append(children, tok(n.Closing, len(")"))) + } + + case *ast.File: + // TODO test: Doc + children = append(children, + tok(n.Package, len("package"))) + + case *ast.ForStmt: + children = append(children, + tok(n.For, len("for"))) + + case *ast.FuncDecl: + // TODO(adonovan): FuncDecl.Comment? + + // Uniquely, FuncDecl breaks the invariant that + // preorder traversal yields tokens in lexical order: + // in fact, FuncDecl.Recv precedes FuncDecl.Type.Func. + // + // As a workaround, we inline the case for FuncType + // here and order things correctly. + // We also need to insert the elided FuncType just + // before the 'visit' recursion. + // + children = nil // discard ast.Walk(FuncDecl) info subtrees + children = append(children, tok(n.Type.Func, len("func"))) + if n.Recv != nil { + children = append(children, n.Recv) + } + children = append(children, n.Name) + if tparams := n.Type.TypeParams; tparams != nil { + children = append(children, tparams) + } + if n.Type.Params != nil { + children = append(children, n.Type.Params) + } + if n.Type.Results != nil { + children = append(children, n.Type.Results) + } + if n.Body != nil { + children = append(children, n.Body) + } + + case *ast.FuncLit: + // nop + + case *ast.FuncType: + if n.Func != 0 { + children = append(children, + tok(n.Func, len("func"))) + } + + case *ast.GenDecl: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + if n.Lparen != 0 { + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + } + + case *ast.GoStmt: + children = append(children, + tok(n.Go, len("go"))) + + case *ast.Ident: + children = append(children, + tok(n.NamePos, len(n.Name))) + + case *ast.IfStmt: + children = append(children, + tok(n.If, len("if"))) + + case *ast.ImportSpec: + // TODO(adonovan): ImportSpec.{Doc,EndPos}? + + case *ast.IncDecStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.IndexExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.IndexListExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.InterfaceType: + children = append(children, + tok(n.Interface, len("interface"))) + + case *ast.KeyValueExpr: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.LabeledStmt: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.MapType: + children = append(children, + tok(n.Map, len("map"))) + + case *ast.ParenExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.RangeStmt: + children = append(children, + tok(n.For, len("for")), + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.ReturnStmt: + children = append(children, + tok(n.Return, len("return"))) + + case *ast.SelectStmt: + children = append(children, + tok(n.Select, len("select"))) + + case *ast.SelectorExpr: + // nop + + case *ast.SendStmt: + children = append(children, + tok(n.Arrow, len("<-"))) + + case *ast.SliceExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.StarExpr: + children = append(children, tok(n.Star, len("*"))) + + case *ast.StructType: + children = append(children, tok(n.Struct, len("struct"))) + + case *ast.SwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.TypeAssertExpr: + children = append(children, + tok(n.Lparen-1, len(".")), + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.TypeSpec: + // TODO(adonovan): TypeSpec.{Doc,Comment}? + + case *ast.TypeSwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.UnaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.ValueSpec: + // TODO(adonovan): ValueSpec.{Doc,Comment}? + + case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt: + // nop + } + + // TODO(adonovan): opt: merge the logic of ast.Inspect() into + // the switch above so we can make interleaved callbacks for + // both Nodes and Tokens in the right order and avoid the need + // to sort. + sort.Sort(byPos(children)) + + return children +} + +type byPos []ast.Node + +func (sl byPos) Len() int { + return len(sl) +} +func (sl byPos) Less(i, j int) bool { + return sl[i].Pos() < sl[j].Pos() +} +func (sl byPos) Swap(i, j int) { + sl[i], sl[j] = sl[j], sl[i] +} + +// NodeDescription returns a description of the concrete type of n suitable +// for a user interface. +// +// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident, +// StarExpr) we could be much more specific given the path to the AST +// root. Perhaps we should do that. +func NodeDescription(n ast.Node) string { + switch n := n.(type) { + case *ast.ArrayType: + return "array type" + case *ast.AssignStmt: + return "assignment" + case *ast.BadDecl: + return "bad declaration" + case *ast.BadExpr: + return "bad expression" + case *ast.BadStmt: + return "bad statement" + case *ast.BasicLit: + return "basic literal" + case *ast.BinaryExpr: + return fmt.Sprintf("binary %s operation", n.Op) + case *ast.BlockStmt: + return "block" + case *ast.BranchStmt: + switch n.Tok { + case token.BREAK: + return "break statement" + case token.CONTINUE: + return "continue statement" + case token.GOTO: + return "goto statement" + case token.FALLTHROUGH: + return "fall-through statement" + } + case *ast.CallExpr: + if len(n.Args) == 1 && !n.Ellipsis.IsValid() { + return "function call (or conversion)" + } + return "function call" + case *ast.CaseClause: + return "case clause" + case *ast.ChanType: + return "channel type" + case *ast.CommClause: + return "communication clause" + case *ast.Comment: + return "comment" + case *ast.CommentGroup: + return "comment group" + case *ast.CompositeLit: + return "composite literal" + case *ast.DeclStmt: + return NodeDescription(n.Decl) + " statement" + case *ast.DeferStmt: + return "defer statement" + case *ast.Ellipsis: + return "ellipsis" + case *ast.EmptyStmt: + return "empty statement" + case *ast.ExprStmt: + return "expression statement" + case *ast.Field: + // Can be any of these: + // struct {x, y int} -- struct field(s) + // struct {T} -- anon struct field + // interface {I} -- interface embedding + // interface {f()} -- interface method + // func (A) func(B) C -- receiver, param(s), result(s) + return "field/method/parameter" + case *ast.FieldList: + return "field/method/parameter list" + case *ast.File: + return "source file" + case *ast.ForStmt: + return "for loop" + case *ast.FuncDecl: + return "function declaration" + case *ast.FuncLit: + return "function literal" + case *ast.FuncType: + return "function type" + case *ast.GenDecl: + switch n.Tok { + case token.IMPORT: + return "import declaration" + case token.CONST: + return "constant declaration" + case token.TYPE: + return "type declaration" + case token.VAR: + return "variable declaration" + } + case *ast.GoStmt: + return "go statement" + case *ast.Ident: + return "identifier" + case *ast.IfStmt: + return "if statement" + case *ast.ImportSpec: + return "import specification" + case *ast.IncDecStmt: + if n.Tok == token.INC { + return "increment statement" + } + return "decrement statement" + case *ast.IndexExpr: + return "index expression" + case *ast.IndexListExpr: + return "index list expression" + case *ast.InterfaceType: + return "interface type" + case *ast.KeyValueExpr: + return "key/value association" + case *ast.LabeledStmt: + return "statement label" + case *ast.MapType: + return "map type" + case *ast.Package: + return "package" + case *ast.ParenExpr: + return "parenthesized " + NodeDescription(n.X) + case *ast.RangeStmt: + return "range loop" + case *ast.ReturnStmt: + return "return statement" + case *ast.SelectStmt: + return "select statement" + case *ast.SelectorExpr: + return "selector" + case *ast.SendStmt: + return "channel send" + case *ast.SliceExpr: + return "slice expression" + case *ast.StarExpr: + return "*-operation" // load/store expr or pointer type + case *ast.StructType: + return "struct type" + case *ast.SwitchStmt: + return "switch statement" + case *ast.TypeAssertExpr: + return "type assertion" + case *ast.TypeSpec: + return "type specification" + case *ast.TypeSwitchStmt: + return "type switch" + case *ast.UnaryExpr: + return fmt.Sprintf("unary %s operation", n.Op) + case *ast.ValueSpec: + return "value specification" + + } + panic(fmt.Sprintf("unexpected node type: %T", n)) +} + +func is[T any](x any) bool { + _, ok := x.(T) + return ok +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/src/cmd/vendor/golang.org/x/tools/go/ast/astutil/imports.go new file mode 100644 index 00000000000..5bacc0fa49e --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -0,0 +1,472 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package astutil contains common utilities for working with the Go AST. +package astutil // import "golang.org/x/tools/go/ast/astutil" + +import ( + "fmt" + "go/ast" + "go/token" + "slices" + "strconv" + "strings" +) + +// AddImport adds the import path to the file f, if absent. +func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) { + return AddNamedImport(fset, f, "", path) +} + +// AddNamedImport adds the import with the given name and path to the file f, if absent. +// If name is not empty, it is used to rename the import. +// +// For example, calling +// +// AddNamedImport(fset, f, "pathpkg", "path") +// +// adds +// +// import pathpkg "path" +func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) { + if imports(f, name, path) { + return false + } + + newImport := &ast.ImportSpec{ + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(path), + }, + } + if name != "" { + newImport.Name = &ast.Ident{Name: name} + } + + // Find an import decl to add to. + // The goal is to find an existing import + // whose import path has the longest shared + // prefix with path. + var ( + bestMatch = -1 // length of longest shared prefix + lastImport = -1 // index in f.Decls of the file's final import decl + impDecl *ast.GenDecl // import decl containing the best match + impIndex = -1 // spec index in impDecl containing the best match + + isThirdPartyPath = isThirdParty(path) + ) + for i, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if ok && gen.Tok == token.IMPORT { + lastImport = i + // Do not add to import "C", to avoid disrupting the + // association with its doc comment, breaking cgo. + if declImports(gen, "C") { + continue + } + + // Match an empty import decl if that's all that is available. + if len(gen.Specs) == 0 && bestMatch == -1 { + impDecl = gen + } + + // Compute longest shared prefix with imports in this group and find best + // matched import spec. + // 1. Always prefer import spec with longest shared prefix. + // 2. While match length is 0, + // - for stdlib package: prefer first import spec. + // - for third party package: prefer first third party import spec. + // We cannot use last import spec as best match for third party package + // because grouped imports are usually placed last by goimports -local + // flag. + // See issue #19190. + seenAnyThirdParty := false + for j, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + p := importPath(impspec) + n := matchLen(p, path) + if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) { + bestMatch = n + impDecl = gen + impIndex = j + } + seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p) + } + } + } + + // If no import decl found, add one after the last import. + if impDecl == nil { + impDecl = &ast.GenDecl{ + Tok: token.IMPORT, + } + if lastImport >= 0 { + impDecl.TokPos = f.Decls[lastImport].End() + } else { + // There are no existing imports. + // Our new import, preceded by a blank line, goes after the package declaration + // and after the comment, if any, that starts on the same line as the + // package declaration. + impDecl.TokPos = f.Package + + file := fset.File(f.Package) + pkgLine := file.Line(f.Package) + for _, c := range f.Comments { + if file.Line(c.Pos()) > pkgLine { + break + } + // +2 for a blank line + impDecl.TokPos = c.End() + 2 + } + } + f.Decls = append(f.Decls, nil) + copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) + f.Decls[lastImport+1] = impDecl + } + + // Insert new import at insertAt. + insertAt := 0 + if impIndex >= 0 { + // insert after the found import + insertAt = impIndex + 1 + } + impDecl.Specs = append(impDecl.Specs, nil) + copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) + impDecl.Specs[insertAt] = newImport + pos := impDecl.Pos() + if insertAt > 0 { + // If there is a comment after an existing import, preserve the comment + // position by adding the new import after the comment. + if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil { + pos = spec.Comment.End() + } else { + // Assign same position as the previous import, + // so that the sorter sees it as being in the same block. + pos = impDecl.Specs[insertAt-1].Pos() + } + } + if newImport.Name != nil { + newImport.Name.NamePos = pos + } + newImport.Path.ValuePos = pos + newImport.EndPos = pos + + // Clean up parens. impDecl contains at least one spec. + if len(impDecl.Specs) == 1 { + // Remove unneeded parens. + impDecl.Lparen = token.NoPos + } else if !impDecl.Lparen.IsValid() { + // impDecl needs parens added. + impDecl.Lparen = impDecl.Specs[0].Pos() + } + + f.Imports = append(f.Imports, newImport) + + if len(f.Decls) <= 1 { + return true + } + + // Merge all the import declarations into the first one. + var first *ast.GenDecl + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { + continue + } + if first == nil { + first = gen + continue // Don't touch the first one. + } + // We now know there is more than one package in this import + // declaration. Ensure that it ends up parenthesized. + first.Lparen = first.Pos() + // Move the imports of the other import declaration to the first one. + for _, spec := range gen.Specs { + spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + first.Specs = append(first.Specs, spec) + } + f.Decls = slices.Delete(f.Decls, i, i+1) + i-- + } + + return true +} + +func isThirdParty(importPath string) bool { + // Third party package import path usually contains "." (".com", ".org", ...) + // This logic is taken from golang.org/x/tools/imports package. + return strings.Contains(importPath, ".") +} + +// DeleteImport deletes the import path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) { + return DeleteNamedImport(fset, f, "", path) +} + +// DeleteNamedImport deletes the import with the given name and path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { + var ( + delspecs = make(map[*ast.ImportSpec]bool) + delcomments = make(map[*ast.CommentGroup]bool) + ) + + // Find the import nodes that import path, if any. + for i := 0; i < len(f.Decls); i++ { + gen, ok := f.Decls[i].(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT { + continue + } + for j := 0; j < len(gen.Specs); j++ { + impspec := gen.Specs[j].(*ast.ImportSpec) + if importName(impspec) != name || importPath(impspec) != path { + continue + } + + // We found an import spec that imports path. + // Delete it. + delspecs[impspec] = true + deleted = true + gen.Specs = slices.Delete(gen.Specs, j, j+1) + + // If this was the last import spec in this decl, + // delete the decl, too. + if len(gen.Specs) == 0 { + f.Decls = slices.Delete(f.Decls, i, i+1) + i-- + break + } else if len(gen.Specs) == 1 { + if impspec.Doc != nil { + delcomments[impspec.Doc] = true + } + if impspec.Comment != nil { + delcomments[impspec.Comment] = true + } + for _, cg := range f.Comments { + // Found comment on the same line as the import spec. + if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { + delcomments[cg] = true + break + } + } + + spec := gen.Specs[0].(*ast.ImportSpec) + + // Move the documentation right after the import decl. + if spec.Doc != nil { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + } + for _, cg := range f.Comments { + if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + break + } + } + } + if j > 0 { + lastImpspec := gen.Specs[j-1].(*ast.ImportSpec) + lastLine := fset.PositionFor(lastImpspec.Path.ValuePos, false).Line + line := fset.PositionFor(impspec.Path.ValuePos, false).Line + + // We deleted an entry but now there may be + // a blank line-sized hole where the import was. + if line-lastLine > 1 || !gen.Rparen.IsValid() { + // There was a blank line immediately preceding the deleted import, + // so there's no need to close the hole. The right parenthesis is + // invalid after AddImport to an import statement without parenthesis. + // Do nothing. + } else if line != fset.File(gen.Rparen).LineCount() { + // There was no blank line. Close the hole. + fset.File(gen.Rparen).MergeLine(line) + } + } + j-- + } + } + + // Delete imports from f.Imports. + before := len(f.Imports) + f.Imports = slices.DeleteFunc(f.Imports, func(imp *ast.ImportSpec) bool { + _, ok := delspecs[imp] + return ok + }) + if len(f.Imports)+len(delspecs) != before { + // This can happen when the AST is invalid (i.e. imports differ between f.Decls and f.Imports). + panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) + } + + // Delete comments from f.Comments. + f.Comments = slices.DeleteFunc(f.Comments, func(cg *ast.CommentGroup) bool { + _, ok := delcomments[cg] + return ok + }) + + return +} + +// RewriteImport rewrites any import of path oldPath to path newPath. +func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) { + for _, imp := range f.Imports { + if importPath(imp) == oldPath { + rewrote = true + // record old End, because the default is to compute + // it using the length of imp.Path.Value. + imp.EndPos = imp.End() + imp.Path.Value = strconv.Quote(newPath) + } + } + return +} + +// UsesImport reports whether a given import is used. +// The provided File must have been parsed with syntactic object resolution +// (not using go/parser.SkipObjectResolution). +func UsesImport(f *ast.File, path string) (used bool) { + if f.Scope == nil { + panic("file f was not parsed with syntactic object resolution") + } + spec := importSpec(f, path) + if spec == nil { + return + } + + name := spec.Name.String() + switch name { + case "": + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + case "_", ".": + // Not sure if this import is used - err on the side of caution. + return true + } + + ast.Walk(visitFn(func(n ast.Node) { + sel, ok := n.(*ast.SelectorExpr) + if ok && isTopName(sel.X, name) { + used = true + } + }), f) + + return +} + +type visitFn func(node ast.Node) + +func (fn visitFn) Visit(node ast.Node) ast.Visitor { + fn(node) + return fn +} + +// imports reports whether f has an import with the specified name and path. +func imports(f *ast.File, name, path string) bool { + for _, s := range f.Imports { + if importName(s) == name && importPath(s) == path { + return true + } + } + return false +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if importPath(s) == path { + return s + } + } + return nil +} + +// importName returns the name of s, +// or "" if the import is not named. +func importName(s *ast.ImportSpec) string { + if s.Name == nil { + return "" + } + return s.Name.Name +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return t +} + +// declImports reports whether gen contains an import of path. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +// matchLen returns the length of the longest path segment prefix shared by x and y. +func matchLen(x, y string) int { + n := 0 + for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ { + if x[i] == '/' { + n++ + } + } + return n +} + +// isTopName returns true if n is a top-level unresolved identifier with the given name. +func isTopName(n ast.Expr, name string) bool { + id, ok := n.(*ast.Ident) + return ok && id.Name == name && id.Obj == nil +} + +// Imports returns the file imports grouped by paragraph. +func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { + var groups [][]*ast.ImportSpec + + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.IMPORT { + break + } + + group := []*ast.ImportSpec{} + + var lastLine int + for _, spec := range genDecl.Specs { + importSpec := spec.(*ast.ImportSpec) + pos := importSpec.Path.ValuePos + line := fset.Position(pos).Line + if lastLine > 0 && pos > 0 && line-lastLine > 1 { + groups = append(groups, group) + group = []*ast.ImportSpec{} + } + group = append(group, importSpec) + lastLine = line + } + groups = append(groups, group) + } + + return groups +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/src/cmd/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go new file mode 100644 index 00000000000..4ad0549304c --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -0,0 +1,490 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import ( + "fmt" + "go/ast" + "reflect" + "sort" +) + +// An ApplyFunc is invoked by Apply for each node n, even if n is nil, +// before and/or after the node's children, using a Cursor describing +// the current node and providing operations on it. +// +// The return value of ApplyFunc controls the syntax tree traversal. +// See Apply for details. +type ApplyFunc func(*Cursor) bool + +// Apply traverses a syntax tree recursively, starting with root, +// and calling pre and post for each node as described below. +// Apply returns the syntax tree, possibly modified. +// +// If pre is not nil, it is called for each node before the node's +// children are traversed (pre-order). If pre returns false, no +// children are traversed, and post is not called for that node. +// +// If post is not nil, and a prior call of pre didn't return false, +// post is called for each node after its children are traversed +// (post-order). If post returns false, traversal is terminated and +// Apply returns immediately. +// +// Only fields that refer to AST nodes are considered children; +// i.e., token.Pos, Scopes, Objects, and fields of basic types +// (strings, etc.) are ignored. +// +// Children are traversed in the order in which they appear in the +// respective node's struct definition. A package's files are +// traversed in the filenames' alphabetical order. +func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) { + parent := &struct{ ast.Node }{root} + defer func() { + if r := recover(); r != nil && r != abort { + panic(r) + } + result = parent.Node + }() + a := &application{pre: pre, post: post} + a.apply(parent, "Node", nil, root) + return +} + +var abort = new(int) // singleton, to signal termination of Apply + +// A Cursor describes a node encountered during Apply. +// Information about the node and its parent is available +// from the Node, Parent, Name, and Index methods. +// +// If p is a variable of type and value of the current parent node +// c.Parent(), and f is the field identifier with name c.Name(), +// the following invariants hold: +// +// p.f == c.Node() if c.Index() < 0 +// p.f[c.Index()] == c.Node() if c.Index() >= 0 +// +// The methods Replace, Delete, InsertBefore, and InsertAfter +// can be used to change the AST without disrupting Apply. +// +// This type is not to be confused with [inspector.Cursor] from +// package [golang.org/x/tools/go/ast/inspector], which provides +// stateless navigation of immutable syntax trees. +type Cursor struct { + parent ast.Node + name string + iter *iterator // valid if non-nil + node ast.Node +} + +// Node returns the current Node. +func (c *Cursor) Node() ast.Node { return c.node } + +// Parent returns the parent of the current Node. +func (c *Cursor) Parent() ast.Node { return c.parent } + +// Name returns the name of the parent Node field that contains the current Node. +// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns +// the filename for the current Node. +func (c *Cursor) Name() string { return c.name } + +// Index reports the index >= 0 of the current Node in the slice of Nodes that +// contains it, or a value < 0 if the current Node is not part of a slice. +// The index of the current node changes if InsertBefore is called while +// processing the current node. +func (c *Cursor) Index() int { + if c.iter != nil { + return c.iter.index + } + return -1 +} + +// field returns the current node's parent field value. +func (c *Cursor) field() reflect.Value { + return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name) +} + +// Replace replaces the current Node with n. +// The replacement node is not walked by Apply. +func (c *Cursor) Replace(n ast.Node) { + if _, ok := c.node.(*ast.File); ok { + file, ok := n.(*ast.File) + if !ok { + panic("attempt to replace *ast.File with non-*ast.File") + } + c.parent.(*ast.Package).Files[c.name] = file + return + } + + v := c.field() + if i := c.Index(); i >= 0 { + v = v.Index(i) + } + v.Set(reflect.ValueOf(n)) +} + +// Delete deletes the current Node from its containing slice. +// If the current Node is not part of a slice, Delete panics. +// As a special case, if the current node is a package file, +// Delete removes it from the package's Files map. +func (c *Cursor) Delete() { + if _, ok := c.node.(*ast.File); ok { + delete(c.parent.(*ast.Package).Files, c.name) + return + } + + i := c.Index() + if i < 0 { + panic("Delete node not contained in slice") + } + v := c.field() + l := v.Len() + reflect.Copy(v.Slice(i, l), v.Slice(i+1, l)) + v.Index(l - 1).Set(reflect.Zero(v.Type().Elem())) + v.SetLen(l - 1) + c.iter.step-- +} + +// InsertAfter inserts n after the current Node in its containing slice. +// If the current Node is not part of a slice, InsertAfter panics. +// Apply does not walk n. +func (c *Cursor) InsertAfter(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertAfter node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l)) + v.Index(i + 1).Set(reflect.ValueOf(n)) + c.iter.step++ +} + +// InsertBefore inserts n before the current Node in its containing slice. +// If the current Node is not part of a slice, InsertBefore panics. +// Apply will not walk n. +func (c *Cursor) InsertBefore(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertBefore node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+1, l), v.Slice(i, l)) + v.Index(i).Set(reflect.ValueOf(n)) + c.iter.index++ +} + +// application carries all the shared data so we can pass it around cheaply. +type application struct { + pre, post ApplyFunc + cursor Cursor + iter iterator +} + +func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) { + // convert typed nil into untyped nil + if v := reflect.ValueOf(n); v.Kind() == reflect.Pointer && v.IsNil() { + n = nil + } + + // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead + saved := a.cursor + a.cursor.parent = parent + a.cursor.name = name + a.cursor.iter = iter + a.cursor.node = n + + if a.pre != nil && !a.pre(&a.cursor) { + a.cursor = saved + return + } + + // walk children + // (the order of the cases matches the order of the corresponding node types in go/ast) + switch n := n.(type) { + case nil: + // nothing to do + + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + if n != nil { + a.applyList(n, "List") + } + + case *ast.Field: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.FieldList: + a.applyList(n, "List") + + // Expressions + case *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.Ellipsis: + a.apply(n, "Elt", nil, n.Elt) + + case *ast.FuncLit: + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + case *ast.CompositeLit: + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Elts") + + case *ast.ParenExpr: + a.apply(n, "X", nil, n.X) + + case *ast.SelectorExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Sel", nil, n.Sel) + + case *ast.IndexExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Index", nil, n.Index) + + case *ast.IndexListExpr: + a.apply(n, "X", nil, n.X) + a.applyList(n, "Indices") + + case *ast.SliceExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Low", nil, n.Low) + a.apply(n, "High", nil, n.High) + a.apply(n, "Max", nil, n.Max) + + case *ast.TypeAssertExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Type", nil, n.Type) + + case *ast.CallExpr: + a.apply(n, "Fun", nil, n.Fun) + a.applyList(n, "Args") + + case *ast.StarExpr: + a.apply(n, "X", nil, n.X) + + case *ast.UnaryExpr: + a.apply(n, "X", nil, n.X) + + case *ast.BinaryExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Y", nil, n.Y) + + case *ast.KeyValueExpr: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + // Types + case *ast.ArrayType: + a.apply(n, "Len", nil, n.Len) + a.apply(n, "Elt", nil, n.Elt) + + case *ast.StructType: + a.apply(n, "Fields", nil, n.Fields) + + case *ast.FuncType: + if tparams := n.TypeParams; tparams != nil { + a.apply(n, "TypeParams", nil, tparams) + } + a.apply(n, "Params", nil, n.Params) + a.apply(n, "Results", nil, n.Results) + + case *ast.InterfaceType: + a.apply(n, "Methods", nil, n.Methods) + + case *ast.MapType: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + case *ast.ChanType: + a.apply(n, "Value", nil, n.Value) + + // Statements + case *ast.BadStmt: + // nothing to do + + case *ast.DeclStmt: + a.apply(n, "Decl", nil, n.Decl) + + case *ast.EmptyStmt: + // nothing to do + + case *ast.LabeledStmt: + a.apply(n, "Label", nil, n.Label) + a.apply(n, "Stmt", nil, n.Stmt) + + case *ast.ExprStmt: + a.apply(n, "X", nil, n.X) + + case *ast.SendStmt: + a.apply(n, "Chan", nil, n.Chan) + a.apply(n, "Value", nil, n.Value) + + case *ast.IncDecStmt: + a.apply(n, "X", nil, n.X) + + case *ast.AssignStmt: + a.applyList(n, "Lhs") + a.applyList(n, "Rhs") + + case *ast.GoStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.DeferStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.ReturnStmt: + a.applyList(n, "Results") + + case *ast.BranchStmt: + a.apply(n, "Label", nil, n.Label) + + case *ast.BlockStmt: + a.applyList(n, "List") + + case *ast.IfStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Body", nil, n.Body) + a.apply(n, "Else", nil, n.Else) + + case *ast.CaseClause: + a.applyList(n, "List") + a.applyList(n, "Body") + + case *ast.SwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Body", nil, n.Body) + + case *ast.TypeSwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Assign", nil, n.Assign) + a.apply(n, "Body", nil, n.Body) + + case *ast.CommClause: + a.apply(n, "Comm", nil, n.Comm) + a.applyList(n, "Body") + + case *ast.SelectStmt: + a.apply(n, "Body", nil, n.Body) + + case *ast.ForStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Post", nil, n.Post) + a.apply(n, "Body", nil, n.Body) + + case *ast.RangeStmt: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + a.apply(n, "X", nil, n.X) + a.apply(n, "Body", nil, n.Body) + + // Declarations + case *ast.ImportSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Path", nil, n.Path) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.ValueSpec: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Values") + a.apply(n, "Comment", nil, n.Comment) + + case *ast.TypeSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + if tparams := n.TypeParams; tparams != nil { + a.apply(n, "TypeParams", nil, tparams) + } + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.BadDecl: + // nothing to do + + case *ast.GenDecl: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Specs") + + case *ast.FuncDecl: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Recv", nil, n.Recv) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + // Files and packages + case *ast.File: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.applyList(n, "Decls") + // Don't walk n.Comments; they have either been walked already if + // they are Doc comments, or they can be easily walked explicitly. + + case *ast.Package: + // collect and sort names for reproducible behavior + var names []string + for name := range n.Files { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + a.apply(n, name, nil, n.Files[name]) + } + + default: + panic(fmt.Sprintf("Apply: unexpected node type %T", n)) + } + + if a.post != nil && !a.post(&a.cursor) { + panic(abort) + } + + a.cursor = saved +} + +// An iterator controls iteration over a slice of nodes. +type iterator struct { + index, step int +} + +func (a *application) applyList(parent ast.Node, name string) { + // avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead + saved := a.iter + a.iter.index = 0 + for { + // must reload parent.name each time, since cursor modifications might change it + v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name) + if a.iter.index >= v.Len() { + break + } + + // element x may be nil in a bad AST - be cautious + var x ast.Node + if e := v.Index(a.iter.index); e.IsValid() { + x = e.Interface().(ast.Node) + } + + a.iter.step = 1 + a.apply(parent, name, &a.iter, x) + a.iter.index += a.iter.step + } + a.iter = saved +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/ast/astutil/util.go b/src/cmd/vendor/golang.org/x/tools/go/ast/astutil/util.go new file mode 100644 index 00000000000..c820b208499 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import "go/ast" + +// Unparen returns e with any enclosing parentheses stripped. +// Deprecated: use [ast.Unparen]. +// +//go:fix inline +func Unparen(e ast.Expr) ast.Expr { return ast.Unparen(e) } diff --git a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go index cea89d34dac..970d7507f02 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go @@ -2,166 +2,39 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package analysisinternal provides gopls' internal analyses with a -// number of helper functions that operate on typed syntax trees. +// Package analysisinternal provides helper functions for use in both +// the analysis drivers in go/analysis and gopls, and in various +// analyzers. +// +// TODO(adonovan): this is not ideal as it may lead to unnecessary +// dependencies between drivers and analyzers. Split into analyzerlib +// and driverlib? package analysisinternal import ( - "bytes" "cmp" "fmt" - "go/ast" - "go/printer" - "go/scanner" "go/token" - "go/types" - "iter" - pathpkg "path" + "os" "slices" - "strings" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/moreiters" - "golang.org/x/tools/internal/typesinternal" ) -// Deprecated: this heuristic is ill-defined. -// TODO(adonovan): move to sole use in gopls/internal/cache. -func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos { - // Get the end position for the type error. - file := fset.File(start) - if file == nil { - return start +// ReadFile reads a file and adds it to the FileSet in pass +// so that we can report errors against it using lineStart. +func ReadFile(pass *analysis.Pass, filename string) ([]byte, *token.File, error) { + readFile := pass.ReadFile + if readFile == nil { + readFile = os.ReadFile } - if offset := file.PositionFor(start, false).Offset; offset > len(src) { - return start - } else { - src = src[offset:] + content, err := readFile(filename) + if err != nil { + return nil, nil, err } - - // Attempt to find a reasonable end position for the type error. - // - // TODO(rfindley): the heuristic implemented here is unclear. It looks like - // it seeks the end of the primary operand starting at start, but that is not - // quite implemented (for example, given a func literal this heuristic will - // return the range of the func keyword). - // - // We should formalize this heuristic, or deprecate it by finally proposing - // to add end position to all type checker errors. - // - // Nevertheless, ensure that the end position at least spans the current - // token at the cursor (this was golang/go#69505). - end := start - { - var s scanner.Scanner - fset := token.NewFileSet() - f := fset.AddFile("", fset.Base(), len(src)) - s.Init(f, src, nil /* no error handler */, scanner.ScanComments) - pos, tok, lit := s.Scan() - if tok != token.SEMICOLON && token.Pos(f.Base()) <= pos && pos <= token.Pos(f.Base()+f.Size()) { - off := file.Offset(pos) + len(lit) - src = src[off:] - end += token.Pos(off) - } - } - - // Look for bytes that might terminate the current operand. See note above: - // this is imprecise. - if width := bytes.IndexAny(src, " \n,():;[]+-*/"); width > 0 { - end += token.Pos(width) - } - return end -} - -// MatchingIdents finds the names of all identifiers in 'node' that match any of the given types. -// 'pos' represents the position at which the identifiers may be inserted. 'pos' must be within -// the scope of each of identifier we select. Otherwise, we will insert a variable at 'pos' that -// is unrecognized. -func MatchingIdents(typs []types.Type, node ast.Node, pos token.Pos, info *types.Info, pkg *types.Package) map[types.Type][]string { - - // Initialize matches to contain the variable types we are searching for. - matches := make(map[types.Type][]string) - for _, typ := range typs { - if typ == nil { - continue // TODO(adonovan): is this reachable? - } - matches[typ] = nil // create entry - } - - seen := map[types.Object]struct{}{} - ast.Inspect(node, func(n ast.Node) bool { - if n == nil { - return false - } - // Prevent circular definitions. If 'pos' is within an assignment statement, do not - // allow any identifiers in that assignment statement to be selected. Otherwise, - // we could do the following, where 'x' satisfies the type of 'f0': - // - // x := fakeStruct{f0: x} - // - if assign, ok := n.(*ast.AssignStmt); ok && pos > assign.Pos() && pos <= assign.End() { - return false - } - if n.End() > pos { - return n.Pos() <= pos - } - ident, ok := n.(*ast.Ident) - if !ok || ident.Name == "_" { - return true - } - obj := info.Defs[ident] - if obj == nil || obj.Type() == nil { - return true - } - if _, ok := obj.(*types.TypeName); ok { - return true - } - // Prevent duplicates in matches' values. - if _, ok = seen[obj]; ok { - return true - } - seen[obj] = struct{}{} - // Find the scope for the given position. Then, check whether the object - // exists within the scope. - innerScope := pkg.Scope().Innermost(pos) - if innerScope == nil { - return true - } - _, foundObj := innerScope.LookupParent(ident.Name, pos) - if foundObj != obj { - return true - } - // The object must match one of the types that we are searching for. - // TODO(adonovan): opt: use typeutil.Map? - if names, ok := matches[obj.Type()]; ok { - matches[obj.Type()] = append(names, ident.Name) - } else { - // If the object type does not exactly match - // any of the target types, greedily find the first - // target type that the object type can satisfy. - for typ := range matches { - if equivalentTypes(obj.Type(), typ) { - matches[typ] = append(matches[typ], ident.Name) - } - } - } - return true - }) - return matches -} - -func equivalentTypes(want, got types.Type) bool { - if types.Identical(want, got) { - return true - } - // Code segment to help check for untyped equality from (golang/go#32146). - if rhs, ok := want.(*types.Basic); ok && rhs.Info()&types.IsUntyped > 0 { - if lhs, ok := got.Underlying().(*types.Basic); ok { - return rhs.Info()&types.IsConstType == lhs.Info()&types.IsConstType - } - } - return types.AssignableTo(want, got) + tf := pass.Fset.AddFile(filename, -1, len(content)) + tf.SetLinesForContent(content) + return content, tf, nil } // A ReadFileFunc is a function that returns the @@ -193,207 +66,6 @@ func CheckReadable(pass *analysis.Pass, filename string) error { return fmt.Errorf("Pass.ReadFile: %s is not among OtherFiles, IgnoredFiles, or names of Files", filename) } -// AddImport checks whether this file already imports pkgpath and that -// the import is in scope at pos. If so, it returns the name under -// which it was imported and no edits. Otherwise, it adds a new import -// of pkgpath, using a name derived from the preferred name, and -// returns the chosen name, a prefix to be concatenated with member to -// form a qualified name, and the edit for the new import. -// -// The member argument indicates the name of the desired symbol within -// the imported package. This is needed in the case when the existing -// import is a dot import, because then it is possible that the -// desired symbol is shadowed by other declarations in the current -// package. If member is not shadowed at pos, AddImport returns (".", -// "", nil). (AddImport accepts the caller's implicit claim that the -// imported package declares member.) -// -// Use a preferredName of "_" to request a blank import; -// member is ignored in this case. -// -// It does not mutate its arguments. -func AddImport(info *types.Info, file *ast.File, preferredName, pkgpath, member string, pos token.Pos) (name, prefix string, newImport []analysis.TextEdit) { - // Find innermost enclosing lexical block. - scope := info.Scopes[file].Innermost(pos) - if scope == nil { - panic("no enclosing lexical block") - } - - // Is there an existing import of this package? - // If so, are we in its scope? (not shadowed) - for _, spec := range file.Imports { - pkgname := info.PkgNameOf(spec) - if pkgname != nil && pkgname.Imported().Path() == pkgpath { - name = pkgname.Name() - if preferredName == "_" { - // Request for blank import; any existing import will do. - return name, "", nil - } - if name == "." { - // The scope of ident must be the file scope. - if s, _ := scope.LookupParent(member, pos); s == info.Scopes[file] { - return name, "", nil - } - } else if _, obj := scope.LookupParent(name, pos); obj == pkgname { - return name, name + ".", nil - } - } - } - - // We must add a new import. - - // Ensure we have a fresh name. - newName := preferredName - if preferredName != "_" { - newName = FreshName(scope, pos, preferredName) - } - - // Create a new import declaration either before the first existing - // declaration (which must exist), including its comments; or - // inside the declaration, if it is an import group. - // - // Use a renaming import whenever the preferred name is not - // available, or the chosen name does not match the last - // segment of its path. - newText := fmt.Sprintf("%q", pkgpath) - if newName != preferredName || newName != pathpkg.Base(pkgpath) { - newText = fmt.Sprintf("%s %q", newName, pkgpath) - } - - decl0 := file.Decls[0] - var before ast.Node = decl0 - switch decl0 := decl0.(type) { - case *ast.GenDecl: - if decl0.Doc != nil { - before = decl0.Doc - } - case *ast.FuncDecl: - if decl0.Doc != nil { - before = decl0.Doc - } - } - if gd, ok := before.(*ast.GenDecl); ok && gd.Tok == token.IMPORT && gd.Rparen.IsValid() { - // Have existing grouped import ( ... ) decl. - if IsStdPackage(pkgpath) && len(gd.Specs) > 0 { - // Add spec for a std package before - // first existing spec, followed by - // a blank line if the next one is non-std. - first := gd.Specs[0].(*ast.ImportSpec) - pos = first.Pos() - if !IsStdPackage(first.Path.Value) { - newText += "\n" - } - newText += "\n\t" - } else { - // Add spec at end of group. - pos = gd.Rparen - newText = "\t" + newText + "\n" - } - } else { - // No import decl, or non-grouped import. - // Add a new import decl before first decl. - // (gofmt will merge multiple import decls.) - pos = before.Pos() - newText = "import " + newText + "\n\n" - } - return newName, newName + ".", []analysis.TextEdit{{ - Pos: pos, - End: pos, - NewText: []byte(newText), - }} -} - -// FreshName returns the name of an identifier that is undefined -// at the specified position, based on the preferred name. -func FreshName(scope *types.Scope, pos token.Pos, preferred string) string { - newName := preferred - for i := 0; ; i++ { - if _, obj := scope.LookupParent(newName, pos); obj == nil { - break // fresh - } - newName = fmt.Sprintf("%s%d", preferred, i) - } - return newName -} - -// Format returns a string representation of the node n. -func Format(fset *token.FileSet, n ast.Node) string { - var buf strings.Builder - printer.Fprint(&buf, fset, n) // ignore errors - return buf.String() -} - -// Imports returns true if path is imported by pkg. -func Imports(pkg *types.Package, path string) bool { - for _, imp := range pkg.Imports() { - if imp.Path() == path { - return true - } - } - return false -} - -// IsTypeNamed reports whether t is (or is an alias for) a -// package-level defined type with the given package path and one of -// the given names. It returns false if t is nil. -// -// This function avoids allocating the concatenation of "pkg.Name", -// which is important for the performance of syntax matching. -func IsTypeNamed(t types.Type, pkgPath string, names ...string) bool { - if named, ok := types.Unalias(t).(*types.Named); ok { - tname := named.Obj() - return tname != nil && - typesinternal.IsPackageLevel(tname) && - tname.Pkg().Path() == pkgPath && - slices.Contains(names, tname.Name()) - } - return false -} - -// IsPointerToNamed reports whether t is (or is an alias for) a pointer to a -// package-level defined type with the given package path and one of the given -// names. It returns false if t is not a pointer type. -func IsPointerToNamed(t types.Type, pkgPath string, names ...string) bool { - r := typesinternal.Unpointer(t) - if r == t { - return false - } - return IsTypeNamed(r, pkgPath, names...) -} - -// IsFunctionNamed reports whether obj is a package-level function -// defined in the given package and has one of the given names. -// It returns false if obj is nil. -// -// This function avoids allocating the concatenation of "pkg.Name", -// which is important for the performance of syntax matching. -func IsFunctionNamed(obj types.Object, pkgPath string, names ...string) bool { - f, ok := obj.(*types.Func) - return ok && - typesinternal.IsPackageLevel(obj) && - f.Pkg().Path() == pkgPath && - f.Type().(*types.Signature).Recv() == nil && - slices.Contains(names, f.Name()) -} - -// IsMethodNamed reports whether obj is a method defined on a -// package-level type with the given package and type name, and has -// one of the given names. It returns false if obj is nil. -// -// This function avoids allocating the concatenation of "pkg.TypeName.Name", -// which is important for the performance of syntax matching. -func IsMethodNamed(obj types.Object, pkgPath string, typeName string, names ...string) bool { - if fn, ok := obj.(*types.Func); ok { - if recv := fn.Type().(*types.Signature).Recv(); recv != nil { - _, T := typesinternal.ReceiverNamed(recv) - return T != nil && - IsTypeNamed(T, pkgPath, typeName) && - slices.Contains(names, fn.Name()) - } - } - return false -} - // ValidateFixes validates the set of fixes for a single diagnostic. // Any error indicates a bug in the originating analyzer. // @@ -496,172 +168,6 @@ func validateFix(fset *token.FileSet, fix *analysis.SuggestedFix) error { return nil } -// CanImport reports whether one package is allowed to import another. -// -// TODO(adonovan): allow customization of the accessibility relation -// (e.g. for Bazel). -func CanImport(from, to string) bool { - // TODO(adonovan): better segment hygiene. - if to == "internal" || strings.HasPrefix(to, "internal/") { - // Special case: only std packages may import internal/... - // We can't reliably know whether we're in std, so we - // use a heuristic on the first segment. - first, _, _ := strings.Cut(from, "/") - if strings.Contains(first, ".") { - return false // example.com/foo ∉ std - } - if first == "testdata" { - return false // testdata/foo ∉ std - } - } - if strings.HasSuffix(to, "/internal") { - return strings.HasPrefix(from, to[:len(to)-len("/internal")]) - } - if i := strings.LastIndex(to, "/internal/"); i >= 0 { - return strings.HasPrefix(from, to[:i]) - } - return true -} - -// DeleteStmt returns the edits to remove the [ast.Stmt] identified by -// curStmt, if it is contained within a BlockStmt, CaseClause, -// CommClause, or is the STMT in switch STMT; ... {...}. It returns nil otherwise. -func DeleteStmt(fset *token.FileSet, curStmt inspector.Cursor) []analysis.TextEdit { - stmt := curStmt.Node().(ast.Stmt) - // if the stmt is on a line by itself delete the whole line - // otherwise just delete the statement. - - // this logic would be a lot simpler with the file contents, and somewhat simpler - // if the cursors included the comments. - - tokFile := fset.File(stmt.Pos()) - lineOf := tokFile.Line - stmtStartLine, stmtEndLine := lineOf(stmt.Pos()), lineOf(stmt.End()) - - var from, to token.Pos - // bounds of adjacent syntax/comments on same line, if any - limits := func(left, right token.Pos) { - if lineOf(left) == stmtStartLine { - from = left - } - if lineOf(right) == stmtEndLine { - to = right - } - } - // TODO(pjw): there are other places a statement might be removed: - // IfStmt = "if" [ SimpleStmt ";" ] Expression Block [ "else" ( IfStmt | Block ) ] . - // (removing the blocks requires more rewriting than this routine would do) - // CommCase = "case" ( SendStmt | RecvStmt ) | "default" . - // (removing the stmt requires more rewriting, and it's unclear what the user means) - switch parent := curStmt.Parent().Node().(type) { - case *ast.SwitchStmt: - limits(parent.Switch, parent.Body.Lbrace) - case *ast.TypeSwitchStmt: - limits(parent.Switch, parent.Body.Lbrace) - if parent.Assign == stmt { - return nil // don't let the user break the type switch - } - case *ast.BlockStmt: - limits(parent.Lbrace, parent.Rbrace) - case *ast.CommClause: - limits(parent.Colon, curStmt.Parent().Parent().Node().(*ast.BlockStmt).Rbrace) - if parent.Comm == stmt { - return nil // maybe the user meant to remove the entire CommClause? - } - case *ast.CaseClause: - limits(parent.Colon, curStmt.Parent().Parent().Node().(*ast.BlockStmt).Rbrace) - case *ast.ForStmt: - limits(parent.For, parent.Body.Lbrace) - - default: - return nil // not one of ours - } - - if prev, found := curStmt.PrevSibling(); found && lineOf(prev.Node().End()) == stmtStartLine { - from = prev.Node().End() // preceding statement ends on same line - } - if next, found := curStmt.NextSibling(); found && lineOf(next.Node().Pos()) == stmtEndLine { - to = next.Node().Pos() // following statement begins on same line - } - // and now for the comments -Outer: - for _, cg := range enclosingFile(curStmt).Comments { - for _, co := range cg.List { - if lineOf(co.End()) < stmtStartLine { - continue - } else if lineOf(co.Pos()) > stmtEndLine { - break Outer // no more are possible - } - if lineOf(co.End()) == stmtStartLine && co.End() < stmt.Pos() { - if !from.IsValid() || co.End() > from { - from = co.End() - continue // maybe there are more - } - } - if lineOf(co.Pos()) == stmtEndLine && co.Pos() > stmt.End() { - if !to.IsValid() || co.Pos() < to { - to = co.Pos() - continue // maybe there are more - } - } - } - } - // if either from or to is valid, just remove the statement - // otherwise remove the line - edit := analysis.TextEdit{Pos: stmt.Pos(), End: stmt.End()} - if from.IsValid() || to.IsValid() { - // remove just the statement. - // we can't tell if there is a ; or whitespace right after the statement - // ideally we'd like to remove the former and leave the latter - // (if gofmt has run, there likely won't be a ;) - // In type switches we know there's a semicolon somewhere after the statement, - // but the extra work for this special case is not worth it, as gofmt will fix it. - return []analysis.TextEdit{edit} - } - // remove the whole line - for lineOf(edit.Pos) == stmtStartLine { - edit.Pos-- - } - edit.Pos++ // get back tostmtStartLine - for lineOf(edit.End) == stmtEndLine { - edit.End++ - } - return []analysis.TextEdit{edit} -} - -// Comments returns an iterator over the comments overlapping the specified interval. -func Comments(file *ast.File, start, end token.Pos) iter.Seq[*ast.Comment] { - // TODO(adonovan): optimize use binary O(log n) instead of linear O(n) search. - return func(yield func(*ast.Comment) bool) { - for _, cg := range file.Comments { - for _, co := range cg.List { - if co.Pos() > end { - return - } - if co.End() < start { - continue - } - - if !yield(co) { - return - } - } - } - } -} - -// IsStdPackage reports whether the specified package path belongs to a -// package in the standard library (including internal dependencies). -func IsStdPackage(path string) bool { - // A standard package has no dot in its first segment. - // (It may yet have a dot, e.g. "vendor/golang.org/x/foo".) - slash := strings.IndexByte(path, '/') - if slash < 0 { - slash = len(path) - } - return !strings.Contains(path[:slash], ".") && path != "testdata" -} - // Range returns an [analysis.Range] for the specified start and end positions. func Range(pos, end token.Pos) analysis.Range { return tokenRange{pos, end} @@ -672,9 +178,3 @@ type tokenRange struct{ StartPos, EndPos token.Pos } func (r tokenRange) Pos() token.Pos { return r.StartPos } func (r tokenRange) End() token.Pos { return r.EndPos } - -// enclosingFile returns the syntax tree for the file enclosing c. -func enclosingFile(c inspector.Cursor) *ast.File { - c, _ = moreiters.First(c.Enclosing((*ast.File)(nil))) - return c.Node().(*ast.File) -} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go b/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go index bfb5900f1b3..c6cdf5997e2 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go @@ -35,7 +35,7 @@ import ( // // var Analyzer = &analysis.Analyzer{ // Name: "halting", -// Doc: analysisutil.MustExtractDoc(doc, "halting"), +// Doc: analysisinternal.MustExtractDoc(doc, "halting"), // ... // } func MustExtractDoc(content, name string) string { diff --git a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/generated/generated.go b/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/generated/generated.go new file mode 100644 index 00000000000..13e1b69021a --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/generated/generated.go @@ -0,0 +1,41 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package generated defines an analyzer whose result makes it +// convenient to skip diagnostics within generated files. +package generated + +import ( + "go/ast" + "go/token" + "reflect" + + "golang.org/x/tools/go/analysis" +) + +var Analyzer = &analysis.Analyzer{ + Name: "generated", + Doc: "detect which Go files are generated", + URL: "https://pkg.go.dev/golang.org/x/tools/internal/analysisinternal/generated", + ResultType: reflect.TypeFor[*Result](), + Run: func(pass *analysis.Pass) (any, error) { + set := make(map[*token.File]bool) + for _, file := range pass.Files { + if ast.IsGenerated(file) { + set[pass.Fset.File(file.FileStart)] = true + } + } + return &Result{fset: pass.Fset, generatedFiles: set}, nil + }, +} + +type Result struct { + fset *token.FileSet + generatedFiles map[*token.File]bool +} + +// IsGenerated reports whether the position is within a generated file. +func (r *Result) IsGenerated(pos token.Pos) bool { + return r.generatedFiles[r.fset.File(pos)] +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/astutil/comment.go b/src/cmd/vendor/golang.org/x/tools/internal/astutil/comment.go index c3a256c987c..7e52aeaaac5 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/astutil/comment.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/astutil/comment.go @@ -7,6 +7,7 @@ package astutil import ( "go/ast" "go/token" + "iter" "strings" ) @@ -111,3 +112,24 @@ func Directives(g *ast.CommentGroup) (res []*Directive) { } return } + +// Comments returns an iterator over the comments overlapping the specified interval. +func Comments(file *ast.File, start, end token.Pos) iter.Seq[*ast.Comment] { + // TODO(adonovan): optimize use binary O(log n) instead of linear O(n) search. + return func(yield func(*ast.Comment) bool) { + for _, cg := range file.Comments { + for _, co := range cg.List { + if co.Pos() > end { + return + } + if co.End() < start { + continue + } + + if !yield(co) { + return + } + } + } + } +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/astutil/equal.go b/src/cmd/vendor/golang.org/x/tools/internal/astutil/equal.go index c945de02d4a..210f392387b 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/astutil/equal.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/astutil/equal.go @@ -26,6 +26,14 @@ func Equal(x, y ast.Node, identical func(x, y *ast.Ident) bool) bool { return equal(reflect.ValueOf(x), reflect.ValueOf(y), identical) } +// EqualSyntax reports whether x and y are equal. +// Identifiers are considered equal if they are spelled the same. +// Comments are ignored. +func EqualSyntax(x, y ast.Expr) bool { + sameName := func(x, y *ast.Ident) bool { return x.Name == y.Name } + return Equal(x, y, sameName) +} + func equal(x, y reflect.Value, identical func(x, y *ast.Ident) bool) bool { // Ensure types are the same if x.Type() != y.Type() { diff --git a/src/cmd/vendor/golang.org/x/tools/internal/astutil/util.go b/src/cmd/vendor/golang.org/x/tools/internal/astutil/util.go index 14189155e4e..a1c09835041 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/astutil/util.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/astutil/util.go @@ -6,7 +6,13 @@ package astutil import ( "go/ast" + "go/printer" "go/token" + "strings" + + "golang.org/x/tools/go/ast/edge" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/moreiters" ) // PreorderStack traverses the tree rooted at root, @@ -67,3 +73,47 @@ func NodeContains(n ast.Node, pos token.Pos) bool { } return start <= pos && pos <= end } + +// IsChildOf reports whether cur.ParentEdge is ek. +// +// TODO(adonovan): promote to a method of Cursor. +func IsChildOf(cur inspector.Cursor, ek edge.Kind) bool { + got, _ := cur.ParentEdge() + return got == ek +} + +// EnclosingFile returns the syntax tree for the file enclosing c. +// +// TODO(adonovan): promote this to a method of Cursor. +func EnclosingFile(c inspector.Cursor) *ast.File { + c, _ = moreiters.First(c.Enclosing((*ast.File)(nil))) + return c.Node().(*ast.File) +} + +// DocComment returns the doc comment for a node, if any. +func DocComment(n ast.Node) *ast.CommentGroup { + switch n := n.(type) { + case *ast.FuncDecl: + return n.Doc + case *ast.GenDecl: + return n.Doc + case *ast.ValueSpec: + return n.Doc + case *ast.TypeSpec: + return n.Doc + case *ast.File: + return n.Doc + case *ast.ImportSpec: + return n.Doc + case *ast.Field: + return n.Doc + } + return nil +} + +// Format returns a string representation of the node n. +func Format(fset *token.FileSet, n ast.Node) string { + var buf strings.Builder + printer.Fprint(&buf, fset, n) // ignore errors + return buf.String() +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/old.go b/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/old.go index 4c346706a75..7b7c5cc677b 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/old.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/old.go @@ -378,10 +378,7 @@ func (e *editGraph) twoDone(df, db int) (int, bool) { return 0, false // diagonals cannot overlap } kmin := max(-df, -db+e.delta) - kmax := db + e.delta - if df < kmax { - kmax = df - } + kmax := min(df, db+e.delta) for k := kmin; k <= kmax; k += 2 { x := e.vf.get(df, k) u := e.vb.get(db, k-e.delta) diff --git a/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go b/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go index 2d72d263043..811bb216ea2 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go @@ -103,11 +103,3 @@ func commonSuffixLenString(a, b string) int { } return i } - -func min(x, y int) int { - if x < y { - return x - } else { - return y - } -} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/goplsexport/export.go b/src/cmd/vendor/golang.org/x/tools/internal/goplsexport/export.go new file mode 100644 index 00000000000..2764a97fc7f --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/goplsexport/export.go @@ -0,0 +1,15 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package goplsexport provides various backdoors to not-yet-published +// parts of x/tools that are needed by gopls. +package goplsexport + +import "golang.org/x/tools/go/analysis" + +var ( + ErrorsAsTypeModernizer *analysis.Analyzer // = modernize.errorsastypeAnalyzer + StdIteratorsModernizer *analysis.Analyzer // = modernize.stditeratorsAnalyzer + PlusBuildModernizer *analysis.Analyzer // = modernize.plusbuildAnalyzer +) diff --git a/src/cmd/vendor/golang.org/x/tools/internal/packagepath/packagepath.go b/src/cmd/vendor/golang.org/x/tools/internal/packagepath/packagepath.go new file mode 100644 index 00000000000..fa39a13f9ea --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/packagepath/packagepath.go @@ -0,0 +1,49 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagepath provides metadata operations on package path +// strings. +package packagepath + +// (This package should not depend on go/ast.) +import "strings" + +// CanImport reports whether one package is allowed to import another. +// +// TODO(adonovan): allow customization of the accessibility relation +// (e.g. for Bazel). +func CanImport(from, to string) bool { + // TODO(adonovan): better segment hygiene. + if to == "internal" || strings.HasPrefix(to, "internal/") { + // Special case: only std packages may import internal/... + // We can't reliably know whether we're in std, so we + // use a heuristic on the first segment. + first, _, _ := strings.Cut(from, "/") + if strings.Contains(first, ".") { + return false // example.com/foo ∉ std + } + if first == "testdata" { + return false // testdata/foo ∉ std + } + } + if strings.HasSuffix(to, "/internal") { + return strings.HasPrefix(from, to[:len(to)-len("/internal")]) + } + if i := strings.LastIndex(to, "/internal/"); i >= 0 { + return strings.HasPrefix(from, to[:i]) + } + return true +} + +// IsStdPackage reports whether the specified package path belongs to a +// package in the standard library (including internal dependencies). +func IsStdPackage(path string) bool { + // A standard package has no dot in its first segment. + // (It may yet have a dot, e.g. "vendor/golang.org/x/foo".) + slash := strings.IndexByte(path, '/') + if slash < 0 { + slash = len(path) + } + return !strings.Contains(path[:slash], ".") && path != "testdata" +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/refactor/delete.go b/src/cmd/vendor/golang.org/x/tools/internal/refactor/delete.go new file mode 100644 index 00000000000..6df01d8ef9c --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/refactor/delete.go @@ -0,0 +1,484 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package refactor + +// This file defines operations for computing deletion edits. + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "slices" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/edge" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/typesinternal/typeindex" +) + +// DeleteVar returns edits to delete the declaration of a variable or +// constant whose defining identifier is curId. +// +// It handles variants including: +// - GenDecl > ValueSpec versus AssignStmt; +// - RHS expression has effects, or not; +// - entire statement/declaration may be eliminated; +// and removes associated comments. +// +// If it cannot make the necessary edits, such as for a function +// parameter or result, it returns nil. +func DeleteVar(tokFile *token.File, info *types.Info, curId inspector.Cursor) []analysis.TextEdit { + switch ek, _ := curId.ParentEdge(); ek { + case edge.ValueSpec_Names: + return deleteVarFromValueSpec(tokFile, info, curId) + + case edge.AssignStmt_Lhs: + return deleteVarFromAssignStmt(tokFile, info, curId) + } + + // e.g. function receiver, parameter, or result, + // or "switch v := expr.(T) {}" (which has no object). + return nil +} + +// deleteVarFromValueSpec returns edits to delete the declaration of a +// variable or constant within a ValueSpec. +// +// Precondition: curId is Ident beneath ValueSpec.Names beneath GenDecl. +// +// See also [deleteVarFromAssignStmt], which has parallel structure. +func deleteVarFromValueSpec(tokFile *token.File, info *types.Info, curIdent inspector.Cursor) []analysis.TextEdit { + var ( + id = curIdent.Node().(*ast.Ident) + curSpec = curIdent.Parent() + spec = curSpec.Node().(*ast.ValueSpec) + ) + + declaresOtherNames := slices.ContainsFunc(spec.Names, func(name *ast.Ident) bool { + return name != id && name.Name != "_" + }) + noRHSEffects := !slices.ContainsFunc(spec.Values, func(rhs ast.Expr) bool { + return !typesinternal.NoEffects(info, rhs) + }) + if !declaresOtherNames && noRHSEffects { + // The spec is no longer needed, either to declare + // other variables, or for its side effects. + return DeleteSpec(tokFile, curSpec) + } + + // The spec is still needed, either for + // at least one LHS, or for effects on RHS. + // Blank out or delete just one LHS. + + _, index := curIdent.ParentEdge() // index of LHS within ValueSpec.Names + + // If there is no RHS, we can delete the LHS. + if len(spec.Values) == 0 { + var pos, end token.Pos + if index == len(spec.Names)-1 { + // Delete final name. + // + // var _, lhs1 T + // ------ + pos = spec.Names[index-1].End() + end = spec.Names[index].End() + } else { + // Delete non-final name. + // + // var lhs0, _ T + // ------ + pos = spec.Names[index].Pos() + end = spec.Names[index+1].Pos() + } + return []analysis.TextEdit{{ + Pos: pos, + End: end, + }} + } + + // If the assignment is n:n and the RHS has no effects, + // we can delete the LHS and its corresponding RHS. + if len(spec.Names) == len(spec.Values) && + typesinternal.NoEffects(info, spec.Values[index]) { + + if index == len(spec.Names)-1 { + // Delete final items. + // + // var _, lhs1 = rhs0, rhs1 + // ------ ------ + return []analysis.TextEdit{ + { + Pos: spec.Names[index-1].End(), + End: spec.Names[index].End(), + }, + { + Pos: spec.Values[index-1].End(), + End: spec.Values[index].End(), + }, + } + } else { + // Delete non-final items. + // + // var lhs0, _ = rhs0, rhs1 + // ------ ------ + return []analysis.TextEdit{ + { + Pos: spec.Names[index].Pos(), + End: spec.Names[index+1].Pos(), + }, + { + Pos: spec.Values[index].Pos(), + End: spec.Values[index+1].Pos(), + }, + } + } + } + + // We cannot delete the RHS. + // Blank out the LHS. + return []analysis.TextEdit{{ + Pos: id.Pos(), + End: id.End(), + NewText: []byte("_"), + }} +} + +// Precondition: curId is Ident beneath AssignStmt.Lhs. +// +// See also [deleteVarFromValueSpec], which has parallel structure. +func deleteVarFromAssignStmt(tokFile *token.File, info *types.Info, curIdent inspector.Cursor) []analysis.TextEdit { + var ( + id = curIdent.Node().(*ast.Ident) + curStmt = curIdent.Parent() + assign = curStmt.Node().(*ast.AssignStmt) + ) + + declaresOtherNames := slices.ContainsFunc(assign.Lhs, func(lhs ast.Expr) bool { + lhsId, ok := lhs.(*ast.Ident) + return ok && lhsId != id && lhsId.Name != "_" + }) + noRHSEffects := !slices.ContainsFunc(assign.Rhs, func(rhs ast.Expr) bool { + return !typesinternal.NoEffects(info, rhs) + }) + if !declaresOtherNames && noRHSEffects { + // The assignment is no longer needed, either to + // declare other variables, or for its side effects. + if edits := DeleteStmt(tokFile, curStmt); edits != nil { + return edits + } + // Statement could not not be deleted in this context. + // Fall back to conservative deletion. + } + + // The assign is still needed, either for + // at least one LHS, or for effects on RHS, + // or because it cannot deleted because of its context. + // Blank out or delete just one LHS. + + // If the assignment is 1:1 and the RHS has no effects, + // we can delete the LHS and its corresponding RHS. + _, index := curIdent.ParentEdge() + if len(assign.Lhs) > 1 && + len(assign.Lhs) == len(assign.Rhs) && + typesinternal.NoEffects(info, assign.Rhs[index]) { + + if index == len(assign.Lhs)-1 { + // Delete final items. + // + // _, lhs1 := rhs0, rhs1 + // ------ ------ + return []analysis.TextEdit{ + { + Pos: assign.Lhs[index-1].End(), + End: assign.Lhs[index].End(), + }, + { + Pos: assign.Rhs[index-1].End(), + End: assign.Rhs[index].End(), + }, + } + } else { + // Delete non-final items. + // + // lhs0, _ := rhs0, rhs1 + // ------ ------ + return []analysis.TextEdit{ + { + Pos: assign.Lhs[index].Pos(), + End: assign.Lhs[index+1].Pos(), + }, + { + Pos: assign.Rhs[index].Pos(), + End: assign.Rhs[index+1].Pos(), + }, + } + } + } + + // We cannot delete the RHS. + // Blank out the LHS. + edits := []analysis.TextEdit{{ + Pos: id.Pos(), + End: id.End(), + NewText: []byte("_"), + }} + + // If this eliminates the final variable declared by + // an := statement, we need to turn it into an = + // assignment to avoid a "no new variables on left + // side of :=" error. + if !declaresOtherNames { + edits = append(edits, analysis.TextEdit{ + Pos: assign.TokPos, + End: assign.TokPos + token.Pos(len(":=")), + NewText: []byte("="), + }) + } + + return edits +} + +// DeleteSpec returns edits to delete the {Type,Value}Spec identified by curSpec. +// +// TODO(adonovan): add test suite. Test for consts as well. +func DeleteSpec(tokFile *token.File, curSpec inspector.Cursor) []analysis.TextEdit { + var ( + spec = curSpec.Node().(ast.Spec) + curDecl = curSpec.Parent() + decl = curDecl.Node().(*ast.GenDecl) + ) + + // If it is the sole spec in the decl, + // delete the entire decl. + if len(decl.Specs) == 1 { + return DeleteDecl(tokFile, curDecl) + } + + // Delete the spec and its comments. + _, index := curSpec.ParentEdge() // index of ValueSpec within GenDecl.Specs + pos, end := spec.Pos(), spec.End() + if doc := astutil.DocComment(spec); doc != nil { + pos = doc.Pos() // leading comment + } + if index == len(decl.Specs)-1 { + // Delete final spec. + if c := eolComment(spec); c != nil { + // var (v int // comment \n) + end = c.End() + } + } else { + // Delete non-final spec. + // var ( a T; b T ) + // ----- + end = decl.Specs[index+1].Pos() + } + return []analysis.TextEdit{{ + Pos: pos, + End: end, + }} +} + +// DeleteDecl returns edits to delete the ast.Decl identified by curDecl. +// +// TODO(adonovan): add test suite. +func DeleteDecl(tokFile *token.File, curDecl inspector.Cursor) []analysis.TextEdit { + decl := curDecl.Node().(ast.Decl) + + ek, _ := curDecl.ParentEdge() + switch ek { + case edge.DeclStmt_Decl: + return DeleteStmt(tokFile, curDecl.Parent()) + + case edge.File_Decls: + pos, end := decl.Pos(), decl.End() + if doc := astutil.DocComment(decl); doc != nil { + pos = doc.Pos() + } + + // Delete free-floating comments on same line as rparen. + // var (...) // comment + var ( + file = curDecl.Parent().Node().(*ast.File) + lineOf = tokFile.Line + declEndLine = lineOf(decl.End()) + ) + for _, cg := range file.Comments { + for _, c := range cg.List { + if c.Pos() < end { + continue // too early + } + commentEndLine := lineOf(c.End()) + if commentEndLine > declEndLine { + break // too late + } else if lineOf(c.Pos()) == declEndLine && commentEndLine == declEndLine { + end = c.End() + } + } + } + + return []analysis.TextEdit{{ + Pos: pos, + End: end, + }} + + default: + panic(fmt.Sprintf("Decl parent is %v, want DeclStmt or File", ek)) + } +} + +// DeleteStmt returns the edits to remove the [ast.Stmt] identified by +// curStmt, if it is contained within a BlockStmt, CaseClause, +// CommClause, or is the STMT in switch STMT; ... {...}. It returns nil otherwise. +func DeleteStmt(tokFile *token.File, curStmt inspector.Cursor) []analysis.TextEdit { + stmt := curStmt.Node().(ast.Stmt) + // if the stmt is on a line by itself delete the whole line + // otherwise just delete the statement. + + // this logic would be a lot simpler with the file contents, and somewhat simpler + // if the cursors included the comments. + + lineOf := tokFile.Line + stmtStartLine, stmtEndLine := lineOf(stmt.Pos()), lineOf(stmt.End()) + + var from, to token.Pos + // bounds of adjacent syntax/comments on same line, if any + limits := func(left, right token.Pos) { + if lineOf(left) == stmtStartLine { + from = left + } + if lineOf(right) == stmtEndLine { + to = right + } + } + // TODO(pjw): there are other places a statement might be removed: + // IfStmt = "if" [ SimpleStmt ";" ] Expression Block [ "else" ( IfStmt | Block ) ] . + // (removing the blocks requires more rewriting than this routine would do) + // CommCase = "case" ( SendStmt | RecvStmt ) | "default" . + // (removing the stmt requires more rewriting, and it's unclear what the user means) + switch parent := curStmt.Parent().Node().(type) { + case *ast.SwitchStmt: + limits(parent.Switch, parent.Body.Lbrace) + case *ast.TypeSwitchStmt: + limits(parent.Switch, parent.Body.Lbrace) + if parent.Assign == stmt { + return nil // don't let the user break the type switch + } + case *ast.BlockStmt: + limits(parent.Lbrace, parent.Rbrace) + case *ast.CommClause: + limits(parent.Colon, curStmt.Parent().Parent().Node().(*ast.BlockStmt).Rbrace) + if parent.Comm == stmt { + return nil // maybe the user meant to remove the entire CommClause? + } + case *ast.CaseClause: + limits(parent.Colon, curStmt.Parent().Parent().Node().(*ast.BlockStmt).Rbrace) + case *ast.ForStmt: + limits(parent.For, parent.Body.Lbrace) + + default: + return nil // not one of ours + } + + if prev, found := curStmt.PrevSibling(); found && lineOf(prev.Node().End()) == stmtStartLine { + from = prev.Node().End() // preceding statement ends on same line + } + if next, found := curStmt.NextSibling(); found && lineOf(next.Node().Pos()) == stmtEndLine { + to = next.Node().Pos() // following statement begins on same line + } + // and now for the comments +Outer: + for _, cg := range astutil.EnclosingFile(curStmt).Comments { + for _, co := range cg.List { + if lineOf(co.End()) < stmtStartLine { + continue + } else if lineOf(co.Pos()) > stmtEndLine { + break Outer // no more are possible + } + if lineOf(co.End()) == stmtStartLine && co.End() < stmt.Pos() { + if !from.IsValid() || co.End() > from { + from = co.End() + continue // maybe there are more + } + } + if lineOf(co.Pos()) == stmtEndLine && co.Pos() > stmt.End() { + if !to.IsValid() || co.Pos() < to { + to = co.Pos() + continue // maybe there are more + } + } + } + } + // if either from or to is valid, just remove the statement + // otherwise remove the line + edit := analysis.TextEdit{Pos: stmt.Pos(), End: stmt.End()} + if from.IsValid() || to.IsValid() { + // remove just the statement. + // we can't tell if there is a ; or whitespace right after the statement + // ideally we'd like to remove the former and leave the latter + // (if gofmt has run, there likely won't be a ;) + // In type switches we know there's a semicolon somewhere after the statement, + // but the extra work for this special case is not worth it, as gofmt will fix it. + return []analysis.TextEdit{edit} + } + // remove the whole line + for lineOf(edit.Pos) == stmtStartLine { + edit.Pos-- + } + edit.Pos++ // get back tostmtStartLine + for lineOf(edit.End) == stmtEndLine { + edit.End++ + } + return []analysis.TextEdit{edit} +} + +// DeleteUnusedVars computes the edits required to delete the +// declarations of any local variables whose last uses are in the +// curDelend subtree, which is about to be deleted. +func DeleteUnusedVars(index *typeindex.Index, info *types.Info, tokFile *token.File, curDelend inspector.Cursor) []analysis.TextEdit { + // TODO(adonovan): we might want to generalize this by + // splitting the two phases below, so that we can gather + // across a whole sequence of deletions then finally compute the + // set of variables that are no longer wanted. + + // Count number of deletions of each var. + delcount := make(map[*types.Var]int) + for curId := range curDelend.Preorder((*ast.Ident)(nil)) { + id := curId.Node().(*ast.Ident) + if v, ok := info.Uses[id].(*types.Var); ok && + typesinternal.GetVarKind(v) == typesinternal.LocalVar { // always false before go1.25 + delcount[v]++ + } + } + + // Delete declaration of each var that became unused. + var edits []analysis.TextEdit + for v, count := range delcount { + if len(slices.Collect(index.Uses(v))) == count { + if curDefId, ok := index.Def(v); ok { + edits = append(edits, DeleteVar(tokFile, info, curDefId)...) + } + } + } + return edits +} + +func eolComment(n ast.Node) *ast.CommentGroup { + // TODO(adonovan): support: + // func f() {...} // comment + switch n := n.(type) { + case *ast.GenDecl: + if !n.TokPos.IsValid() && len(n.Specs) == 1 { + return eolComment(n.Specs[0]) + } + case *ast.ValueSpec: + return n.Comment + case *ast.TypeSpec: + return n.Comment + } + return nil +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/refactor/imports.go b/src/cmd/vendor/golang.org/x/tools/internal/refactor/imports.go new file mode 100644 index 00000000000..b5440d896b9 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/refactor/imports.go @@ -0,0 +1,127 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package refactor + +// This file defines operations for computing edits to imports. + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + pathpkg "path" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/internal/packagepath" +) + +// AddImport returns the prefix (either "pkg." or "") that should be +// used to qualify references to the desired symbol (member) imported +// from the specified package, plus any necessary edits to the file's +// import declaration to add a new import. +// +// If the import already exists, and is accessible at pos, AddImport +// returns the existing name and no edits. (If the existing import is +// a dot import, the prefix is "".) +// +// Otherwise, it adds a new import, using a local name derived from +// the preferred name. To request a blank import, use a preferredName +// of "_", and discard the prefix result; member is ignored in this +// case. +// +// AddImport accepts the caller's implicit claim that the imported +// package declares member. +// +// AddImport does not mutate its arguments. +func AddImport(info *types.Info, file *ast.File, preferredName, pkgpath, member string, pos token.Pos) (prefix string, edits []analysis.TextEdit) { + // Find innermost enclosing lexical block. + scope := info.Scopes[file].Innermost(pos) + if scope == nil { + panic("no enclosing lexical block") + } + + // Is there an existing import of this package? + // If so, are we in its scope? (not shadowed) + for _, spec := range file.Imports { + pkgname := info.PkgNameOf(spec) + if pkgname != nil && pkgname.Imported().Path() == pkgpath { + name := pkgname.Name() + if preferredName == "_" { + // Request for blank import; any existing import will do. + return "", nil + } + if name == "." { + // The scope of ident must be the file scope. + if s, _ := scope.LookupParent(member, pos); s == info.Scopes[file] { + return "", nil + } + } else if _, obj := scope.LookupParent(name, pos); obj == pkgname { + return name + ".", nil + } + } + } + + // We must add a new import. + + // Ensure we have a fresh name. + newName := preferredName + if preferredName != "_" { + newName = FreshName(scope, pos, preferredName) + } + + // Create a new import declaration either before the first existing + // declaration (which must exist), including its comments; or + // inside the declaration, if it is an import group. + // + // Use a renaming import whenever the preferred name is not + // available, or the chosen name does not match the last + // segment of its path. + newText := fmt.Sprintf("%q", pkgpath) + if newName != preferredName || newName != pathpkg.Base(pkgpath) { + newText = fmt.Sprintf("%s %q", newName, pkgpath) + } + + decl0 := file.Decls[0] + var before ast.Node = decl0 + switch decl0 := decl0.(type) { + case *ast.GenDecl: + if decl0.Doc != nil { + before = decl0.Doc + } + case *ast.FuncDecl: + if decl0.Doc != nil { + before = decl0.Doc + } + } + if gd, ok := before.(*ast.GenDecl); ok && gd.Tok == token.IMPORT && gd.Rparen.IsValid() { + // Have existing grouped import ( ... ) decl. + if packagepath.IsStdPackage(pkgpath) && len(gd.Specs) > 0 { + // Add spec for a std package before + // first existing spec, followed by + // a blank line if the next one is non-std. + first := gd.Specs[0].(*ast.ImportSpec) + pos = first.Pos() + if !packagepath.IsStdPackage(first.Path.Value) { + newText += "\n" + } + newText += "\n\t" + } else { + // Add spec at end of group. + pos = gd.Rparen + newText = "\t" + newText + "\n" + } + } else { + // No import decl, or non-grouped import. + // Add a new import decl before first decl. + // (gofmt will merge multiple import decls.) + pos = before.Pos() + newText = "import " + newText + "\n\n" + } + return newName + ".", []analysis.TextEdit{{ + Pos: pos, + End: pos, + NewText: []byte(newText), + }} +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/callee.go b/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/callee.go new file mode 100644 index 00000000000..b46340c66a8 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/callee.go @@ -0,0 +1,867 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inline + +// This file defines the analysis of the callee function. + +import ( + "bytes" + "encoding/gob" + "fmt" + "go/ast" + "go/parser" + "go/token" + "go/types" + "slices" + "strings" + + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal" +) + +// A Callee holds information about an inlinable function. Gob-serializable. +type Callee struct { + impl gobCallee +} + +func (callee *Callee) String() string { return callee.impl.Name } + +type gobCallee struct { + Content []byte // file content, compacted to a single func decl + + // results of type analysis (does not reach go/types data structures) + PkgPath string // package path of declaring package + Name string // user-friendly name for error messages + Unexported []string // names of free objects that are unexported + FreeRefs []freeRef // locations of references to free objects + FreeObjs []object // descriptions of free objects + ValidForCallStmt bool // function body is "return expr" where expr is f() or <-ch + NumResults int // number of results (according to type, not ast.FieldList) + Params []*paramInfo // information about parameters (incl. receiver) + TypeParams []*paramInfo // information about type parameters + Results []*paramInfo // information about result variables + Effects []int // order in which parameters are evaluated (see calleefx) + HasDefer bool // uses defer + HasBareReturn bool // uses bare return in non-void function + Returns [][]returnOperandFlags // metadata about result expressions for each return + Labels []string // names of all control labels + Falcon falconResult // falcon constraint system +} + +// returnOperandFlags records metadata about a single result expression in a return +// statement. +type returnOperandFlags int + +const ( + nonTrivialResult returnOperandFlags = 1 << iota // return operand has non-trivial conversion to result type + untypedNilResult // return operand is nil literal +) + +// A freeRef records a reference to a free object. Gob-serializable. +// (This means free relative to the FuncDecl as a whole, i.e. excluding parameters.) +type freeRef struct { + Offset int // byte offset of the reference relative to the FuncDecl + Object int // index into Callee.freeObjs +} + +// An object abstracts a free types.Object referenced by the callee. Gob-serializable. +type object struct { + Name string // Object.Name() + Kind string // one of {var,func,const,type,pkgname,nil,builtin} + PkgPath string // path of object's package (or imported package if kind="pkgname") + PkgName string // name of object's package (or imported package if kind="pkgname") + // TODO(rfindley): should we also track LocalPkgName here? Do we want to + // preserve the local package name? + ValidPos bool // Object.Pos().IsValid() + Shadow shadowMap // shadowing info for the object's refs +} + +// AnalyzeCallee analyzes a function that is a candidate for inlining +// and returns a Callee that describes it. The Callee object, which is +// serializable, can be passed to one or more subsequent calls to +// Inline, each with a different Caller. +// +// This design allows separate analysis of callers and callees in the +// golang.org/x/tools/go/analysis framework: the inlining information +// about a callee can be recorded as a "fact". +// +// The content should be the actual input to the compiler, not the +// apparent source file according to any //line directives that +// may be present within it. +func AnalyzeCallee(logf func(string, ...any), fset *token.FileSet, pkg *types.Package, info *types.Info, decl *ast.FuncDecl, content []byte) (*Callee, error) { + checkInfoFields(info) + + // The client is expected to have determined that the callee + // is a function with a declaration (not a built-in or var). + fn := info.Defs[decl.Name].(*types.Func) + sig := fn.Type().(*types.Signature) + + logf("analyzeCallee %v @ %v", fn, fset.PositionFor(decl.Pos(), false)) + + // Create user-friendly name ("pkg.Func" or "(pkg.T).Method") + var name string + if sig.Recv() == nil { + name = fmt.Sprintf("%s.%s", fn.Pkg().Name(), fn.Name()) + } else { + name = fmt.Sprintf("(%s).%s", types.TypeString(sig.Recv().Type(), (*types.Package).Name), fn.Name()) + } + + if decl.Body == nil { + return nil, fmt.Errorf("cannot inline function %s as it has no body", name) + } + + // Record the location of all free references in the FuncDecl. + // (Parameters are not free by this definition.) + var ( + fieldObjs = fieldObjs(sig) + freeObjIndex = make(map[types.Object]int) + freeObjs []object + freeRefs []freeRef // free refs that may need renaming + unexported []string // free refs to unexported objects, for later error checks + ) + var f func(n ast.Node, stack []ast.Node) bool + var stack []ast.Node + stack = append(stack, decl.Type) // for scope of function itself + visit := func(n ast.Node, stack []ast.Node) { astutil.PreorderStack(n, stack, f) } + f = func(n ast.Node, stack []ast.Node) bool { + switch n := n.(type) { + case *ast.SelectorExpr: + // Check selections of free fields/methods. + if sel, ok := info.Selections[n]; ok && + !within(sel.Obj().Pos(), decl) && + !n.Sel.IsExported() { + sym := fmt.Sprintf("(%s).%s", info.TypeOf(n.X), n.Sel.Name) + unexported = append(unexported, sym) + } + + // Don't recur into SelectorExpr.Sel. + visit(n.X, stack) + return false + + case *ast.CompositeLit: + // Check for struct literals that refer to unexported fields, + // whether keyed or unkeyed. (Logic assumes well-typedness.) + litType := typeparams.Deref(info.TypeOf(n)) + if s, ok := typeparams.CoreType(litType).(*types.Struct); ok { + if n.Type != nil { + visit(n.Type, stack) + } + for i, elt := range n.Elts { + var field *types.Var + var value ast.Expr + if kv, ok := elt.(*ast.KeyValueExpr); ok { + field = info.Uses[kv.Key.(*ast.Ident)].(*types.Var) + value = kv.Value + } else { + field = s.Field(i) + value = elt + } + if !within(field.Pos(), decl) && !field.Exported() { + sym := fmt.Sprintf("(%s).%s", litType, field.Name()) + unexported = append(unexported, sym) + } + + // Don't recur into KeyValueExpr.Key. + visit(value, stack) + } + return false + } + + case *ast.Ident: + if obj, ok := info.Uses[n]; ok { + // Methods and fields are handled by SelectorExpr and CompositeLit. + if isField(obj) || isMethod(obj) { + panic(obj) + } + // Inv: id is a lexical reference. + + // A reference to an unexported package-level declaration + // cannot be inlined into another package. + if !n.IsExported() && + obj.Pkg() != nil && obj.Parent() == obj.Pkg().Scope() { + unexported = append(unexported, n.Name) + } + + // Record free reference (incl. self-reference). + if obj == fn || !within(obj.Pos(), decl) { + objidx, ok := freeObjIndex[obj] + if !ok { + objidx = len(freeObjIndex) + var pkgPath, pkgName string + if pn, ok := obj.(*types.PkgName); ok { + pkgPath = pn.Imported().Path() + pkgName = pn.Imported().Name() + } else if obj.Pkg() != nil { + pkgPath = obj.Pkg().Path() + pkgName = obj.Pkg().Name() + } + freeObjs = append(freeObjs, object{ + Name: obj.Name(), + Kind: objectKind(obj), + PkgName: pkgName, + PkgPath: pkgPath, + ValidPos: obj.Pos().IsValid(), + }) + freeObjIndex[obj] = objidx + } + + freeObjs[objidx].Shadow = freeObjs[objidx].Shadow.add(info, fieldObjs, obj.Name(), stack) + + freeRefs = append(freeRefs, freeRef{ + Offset: int(n.Pos() - decl.Pos()), + Object: objidx, + }) + } + } + } + return true + } + visit(decl, stack) + + // Analyze callee body for "return expr" form, + // where expr is f() or <-ch. These forms are + // safe to inline as a standalone statement. + validForCallStmt := false + if len(decl.Body.List) != 1 { + // not just a return statement + } else if ret, ok := decl.Body.List[0].(*ast.ReturnStmt); ok && len(ret.Results) == 1 { + validForCallStmt = func() bool { + switch expr := ast.Unparen(ret.Results[0]).(type) { + case *ast.CallExpr: // f(x) + callee := typeutil.Callee(info, expr) + if callee == nil { + return false // conversion T(x) + } + + // The only non-void built-in functions that may be + // called as a statement are copy and recover + // (though arguably a call to recover should never + // be inlined as that changes its behavior). + if builtin, ok := callee.(*types.Builtin); ok { + return builtin.Name() == "copy" || + builtin.Name() == "recover" + } + + return true // ordinary call f() + + case *ast.UnaryExpr: // <-x + return expr.Op == token.ARROW // channel receive <-ch + } + + // No other expressions are valid statements. + return false + }() + } + + // Record information about control flow in the callee + // (but not any nested functions). + var ( + hasDefer = false + hasBareReturn = false + returnInfo [][]returnOperandFlags + labels []string + ) + ast.Inspect(decl.Body, func(n ast.Node) bool { + switch n := n.(type) { + case *ast.FuncLit: + return false // prune traversal + case *ast.DeferStmt: + hasDefer = true + case *ast.LabeledStmt: + labels = append(labels, n.Label.Name) + case *ast.ReturnStmt: + + // Are implicit assignment conversions + // to result variables all trivial? + var resultInfo []returnOperandFlags + if len(n.Results) > 0 { + argInfo := func(i int) (ast.Expr, types.Type) { + expr := n.Results[i] + return expr, info.TypeOf(expr) + } + if len(n.Results) == 1 && sig.Results().Len() > 1 { + // Spread return: return f() where f.Results > 1. + tuple := info.TypeOf(n.Results[0]).(*types.Tuple) + argInfo = func(i int) (ast.Expr, types.Type) { + return nil, tuple.At(i).Type() + } + } + for i := range sig.Results().Len() { + expr, typ := argInfo(i) + var flags returnOperandFlags + if typ == types.Typ[types.UntypedNil] { // untyped nil is preserved by go/types + flags |= untypedNilResult + } + if !trivialConversion(info.Types[expr].Value, typ, sig.Results().At(i).Type()) { + flags |= nonTrivialResult + } + resultInfo = append(resultInfo, flags) + } + } else if sig.Results().Len() > 0 { + hasBareReturn = true + } + returnInfo = append(returnInfo, resultInfo) + } + return true + }) + + // Reject attempts to inline cgo-generated functions. + for _, obj := range freeObjs { + // There are others (iconst fconst sconst fpvar macro) + // but this is probably sufficient. + if strings.HasPrefix(obj.Name, "_Cfunc_") || + strings.HasPrefix(obj.Name, "_Ctype_") || + strings.HasPrefix(obj.Name, "_Cvar_") { + return nil, fmt.Errorf("cannot inline cgo-generated functions") + } + } + + // Compact content to just the FuncDecl. + // + // As a space optimization, we don't retain the complete + // callee file content; all we need is "package _; func f() { ... }". + // This reduces the size of analysis facts. + // + // Offsets in the callee information are "relocatable" + // since they are all relative to the FuncDecl. + + content = append([]byte("package _\n"), + content[offsetOf(fset, decl.Pos()):offsetOf(fset, decl.End())]...) + // Sanity check: re-parse the compacted content. + if _, _, err := parseCompact(content); err != nil { + return nil, err + } + + params, results, effects, falcon := analyzeParams(logf, fset, info, decl) + tparams := analyzeTypeParams(logf, fset, info, decl) + return &Callee{gobCallee{ + Content: content, + PkgPath: pkg.Path(), + Name: name, + Unexported: unexported, + FreeObjs: freeObjs, + FreeRefs: freeRefs, + ValidForCallStmt: validForCallStmt, + NumResults: sig.Results().Len(), + Params: params, + TypeParams: tparams, + Results: results, + Effects: effects, + HasDefer: hasDefer, + HasBareReturn: hasBareReturn, + Returns: returnInfo, + Labels: labels, + Falcon: falcon, + }}, nil +} + +// parseCompact parses a Go source file of the form "package _\n func f() { ... }" +// and returns the sole function declaration. +func parseCompact(content []byte) (*token.FileSet, *ast.FuncDecl, error) { + fset := token.NewFileSet() + const mode = parser.ParseComments | parser.SkipObjectResolution | parser.AllErrors + f, err := parser.ParseFile(fset, "callee.go", content, mode) + if err != nil { + return nil, nil, fmt.Errorf("internal error: cannot compact file: %v", err) + } + return fset, f.Decls[0].(*ast.FuncDecl), nil +} + +// A paramInfo records information about a callee receiver, parameter, or result variable. +type paramInfo struct { + Name string // parameter name (may be blank, or even "") + Index int // index within signature + IsResult bool // false for receiver or parameter, true for result variable + IsInterface bool // parameter has a (non-type parameter) interface type + Assigned bool // parameter appears on left side of an assignment statement + Escapes bool // parameter has its address taken + Refs []refInfo // information about references to parameter within body + Shadow shadowMap // shadowing info for the above refs; see [shadowMap] + FalconType string // name of this parameter's type (if basic) in the falcon system +} + +type refInfo struct { + Offset int // FuncDecl-relative byte offset of parameter ref within body + Assignable bool // ref appears in context of assignment to known type + IfaceAssignment bool // ref is being assigned to an interface + AffectsInference bool // ref type may affect type inference + // IsSelectionOperand indicates whether the parameter reference is the + // operand of a selection (param.f). If so, and param's argument is itself + // a receiver parameter (a common case), we don't need to desugar (&v or *ptr) + // the selection: if param.Method is a valid selection, then so is param.fieldOrMethod. + IsSelectionOperand bool +} + +// analyzeParams computes information about parameters of the function declared by decl, +// including a simple "address taken" escape analysis. +// +// It returns two new arrays, one of the receiver and parameters, and +// the other of the result variables of the function. +// +// The input must be well-typed. +func analyzeParams(logf func(string, ...any), fset *token.FileSet, info *types.Info, decl *ast.FuncDecl) (params, results []*paramInfo, effects []int, _ falconResult) { + sig := signature(fset, info, decl) + + paramInfos := make(map[*types.Var]*paramInfo) + { + newParamInfo := func(param *types.Var, isResult bool) *paramInfo { + info := ¶mInfo{ + Name: param.Name(), + IsResult: isResult, + Index: len(paramInfos), + IsInterface: isNonTypeParamInterface(param.Type()), + } + paramInfos[param] = info + return info + } + if sig.Recv() != nil { + params = append(params, newParamInfo(sig.Recv(), false)) + } + for i := 0; i < sig.Params().Len(); i++ { + params = append(params, newParamInfo(sig.Params().At(i), false)) + } + for i := 0; i < sig.Results().Len(); i++ { + results = append(results, newParamInfo(sig.Results().At(i), true)) + } + } + + // Search function body for operations &x, x.f(), and x = y + // where x is a parameter, and record it. + escape(info, decl, func(v *types.Var, escapes bool) { + if info := paramInfos[v]; info != nil { + if escapes { + info.Escapes = true + } else { + info.Assigned = true + } + } + }) + + // Record locations of all references to parameters. + // And record the set of intervening definitions for each parameter. + // + // TODO(adonovan): combine this traversal with the one that computes + // FreeRefs. The tricky part is that calleefx needs this one first. + fieldObjs := fieldObjs(sig) + var stack []ast.Node + stack = append(stack, decl.Type) // for scope of function itself + astutil.PreorderStack(decl.Body, stack, func(n ast.Node, stack []ast.Node) bool { + if id, ok := n.(*ast.Ident); ok { + if v, ok := info.Uses[id].(*types.Var); ok { + if pinfo, ok := paramInfos[v]; ok { + // Record ref information, and any intervening (shadowing) names. + // + // If the parameter v has an interface type, and the reference id + // appears in a context where assignability rules apply, there may be + // an implicit interface-to-interface widening. In that case it is + // not necessary to insert an explicit conversion from the argument + // to the parameter's type. + // + // Contrapositively, if param is not an interface type, then the + // assignment may lose type information, for example in the case that + // the substituted expression is an untyped constant or unnamed type. + stack = append(stack, n) // (the two calls below want n) + assignable, ifaceAssign, affectsInference := analyzeAssignment(info, stack) + ref := refInfo{ + Offset: int(n.Pos() - decl.Pos()), + Assignable: assignable, + IfaceAssignment: ifaceAssign, + AffectsInference: affectsInference, + IsSelectionOperand: isSelectionOperand(stack), + } + pinfo.Refs = append(pinfo.Refs, ref) + pinfo.Shadow = pinfo.Shadow.add(info, fieldObjs, pinfo.Name, stack) + } + } + } + return true + }) + + // Compute subset and order of parameters that are strictly evaluated. + // (Depends on Refs computed above.) + effects = calleefx(info, decl.Body, paramInfos) + logf("effects list = %v", effects) + + falcon := falcon(logf, fset, paramInfos, info, decl) + + return params, results, effects, falcon +} + +// analyzeTypeParams computes information about the type parameters of the function declared by decl. +func analyzeTypeParams(_ logger, fset *token.FileSet, info *types.Info, decl *ast.FuncDecl) []*paramInfo { + sig := signature(fset, info, decl) + paramInfos := make(map[*types.TypeName]*paramInfo) + var params []*paramInfo + collect := func(tpl *types.TypeParamList) { + for i := range tpl.Len() { + typeName := tpl.At(i).Obj() + info := ¶mInfo{Name: typeName.Name()} + params = append(params, info) + paramInfos[typeName] = info + } + } + collect(sig.RecvTypeParams()) + collect(sig.TypeParams()) + + // Find references. + // We don't care about most of the properties that matter for parameter references: + // a type is immutable, cannot have its address taken, and does not undergo conversions. + // TODO(jba): can we nevertheless combine this with the traversal in analyzeParams? + var stack []ast.Node + stack = append(stack, decl.Type) // for scope of function itself + astutil.PreorderStack(decl.Body, stack, func(n ast.Node, stack []ast.Node) bool { + if id, ok := n.(*ast.Ident); ok { + if v, ok := info.Uses[id].(*types.TypeName); ok { + if pinfo, ok := paramInfos[v]; ok { + ref := refInfo{Offset: int(n.Pos() - decl.Pos())} + pinfo.Refs = append(pinfo.Refs, ref) + pinfo.Shadow = pinfo.Shadow.add(info, nil, pinfo.Name, stack) + } + } + } + return true + }) + return params +} + +func signature(fset *token.FileSet, info *types.Info, decl *ast.FuncDecl) *types.Signature { + fnobj, ok := info.Defs[decl.Name] + if !ok { + panic(fmt.Sprintf("%s: no func object for %q", + fset.PositionFor(decl.Name.Pos(), false), decl.Name)) // ill-typed? + } + return fnobj.Type().(*types.Signature) +} + +// -- callee helpers -- + +// analyzeAssignment looks at the given stack, and analyzes certain +// attributes of the innermost expression. +// +// In all cases we 'fail closed' when we cannot detect (or for simplicity +// choose not to detect) the condition in question, meaning we err on the side +// of the more restrictive rule. This is noted for each result below. +// +// - assignable reports whether the expression is used in a position where +// assignability rules apply, such as in an actual assignment, as call +// argument, or in a send to a channel. Defaults to 'false'. If assignable +// is false, the other two results are irrelevant. +// - ifaceAssign reports whether that assignment is to an interface type. +// This is important as we want to preserve the concrete type in that +// assignment. Defaults to 'true'. Notably, if the assigned type is a type +// parameter, we assume that it could have interface type. +// - affectsInference is (somewhat vaguely) defined as whether or not the +// type of the operand may affect the type of the surrounding syntax, +// through type inference. It is infeasible to completely reverse engineer +// type inference, so we over approximate: if the expression is an argument +// to a call to a generic function (but not method!) that uses type +// parameters, assume that unification of that argument may affect the +// inferred types. +func analyzeAssignment(info *types.Info, stack []ast.Node) (assignable, ifaceAssign, affectsInference bool) { + remaining, parent, expr := exprContext(stack) + if parent == nil { + return false, false, false + } + + // TODO(golang/go#70638): simplify when types.Info records implicit conversions. + + // Types do not need to match for assignment to a variable. + if assign, ok := parent.(*ast.AssignStmt); ok { + for i, v := range assign.Rhs { + if v == expr { + if i >= len(assign.Lhs) { + return false, false, false // ill typed + } + // Check to see if the assignment is to an interface type. + if i < len(assign.Lhs) { + // TODO: We could handle spread calls here, but in current usage expr + // is an ident. + if id, _ := assign.Lhs[i].(*ast.Ident); id != nil && info.Defs[id] != nil { + // Types must match for a defining identifier in a short variable + // declaration. + return false, false, false + } + // In all other cases, types should be known. + typ := info.TypeOf(assign.Lhs[i]) + return true, typ == nil || types.IsInterface(typ), false + } + // Default: + return assign.Tok == token.ASSIGN, true, false + } + } + } + + // Types do not need to match for an initializer with known type. + if spec, ok := parent.(*ast.ValueSpec); ok && spec.Type != nil { + if slices.Contains(spec.Values, expr) { + typ := info.TypeOf(spec.Type) + return true, typ == nil || types.IsInterface(typ), false + } + } + + // Types do not need to match for index expressions. + if ix, ok := parent.(*ast.IndexExpr); ok { + if ix.Index == expr { + typ := info.TypeOf(ix.X) + if typ == nil { + return true, true, false + } + m, _ := typeparams.CoreType(typ).(*types.Map) + return true, m == nil || types.IsInterface(m.Key()), false + } + } + + // Types do not need to match for composite literal keys, values, or + // fields. + if kv, ok := parent.(*ast.KeyValueExpr); ok { + var under types.Type + if len(remaining) > 0 { + if complit, ok := remaining[len(remaining)-1].(*ast.CompositeLit); ok { + if typ := info.TypeOf(complit); typ != nil { + // Unpointer to allow for pointers to slices or arrays, which are + // permitted as the types of nested composite literals without a type + // name. + under = typesinternal.Unpointer(typeparams.CoreType(typ)) + } + } + } + if kv.Key == expr { // M{expr: ...}: assign to map key + m, _ := under.(*types.Map) + return true, m == nil || types.IsInterface(m.Key()), false + } + if kv.Value == expr { + switch under := under.(type) { + case interface{ Elem() types.Type }: // T{...: expr}: assign to map/array/slice element + return true, types.IsInterface(under.Elem()), false + case *types.Struct: // Struct{k: expr} + if id, _ := kv.Key.(*ast.Ident); id != nil { + for fi := range under.NumFields() { + field := under.Field(fi) + if info.Uses[id] == field { + return true, types.IsInterface(field.Type()), false + } + } + } + default: + return true, true, false + } + } + } + if lit, ok := parent.(*ast.CompositeLit); ok { + for i, v := range lit.Elts { + if v == expr { + typ := info.TypeOf(lit) + if typ == nil { + return true, true, false + } + // As in the KeyValueExpr case above, unpointer to handle pointers to + // array/slice literals. + under := typesinternal.Unpointer(typeparams.CoreType(typ)) + switch under := under.(type) { + case interface{ Elem() types.Type }: // T{expr}: assign to map/array/slice element + return true, types.IsInterface(under.Elem()), false + case *types.Struct: // Struct{expr}: assign to unkeyed struct field + if i < under.NumFields() { + return true, types.IsInterface(under.Field(i).Type()), false + } + } + return true, true, false + } + } + } + + // Types do not need to match for values sent to a channel. + if send, ok := parent.(*ast.SendStmt); ok { + if send.Value == expr { + typ := info.TypeOf(send.Chan) + if typ == nil { + return true, true, false + } + ch, _ := typeparams.CoreType(typ).(*types.Chan) + return true, ch == nil || types.IsInterface(ch.Elem()), false + } + } + + // Types do not need to match for an argument to a call, unless the + // corresponding parameter has type parameters, as in that case the + // argument type may affect inference. + if call, ok := parent.(*ast.CallExpr); ok { + if _, ok := isConversion(info, call); ok { + return false, false, false // redundant conversions are handled at the call site + } + // Ordinary call. Could be a call of a func, builtin, or function value. + for i, arg := range call.Args { + if arg == expr { + typ := info.TypeOf(call.Fun) + if typ == nil { + return true, true, false + } + sig, _ := typeparams.CoreType(typ).(*types.Signature) + if sig != nil { + // Find the relevant parameter type, accounting for variadics. + paramType := paramTypeAtIndex(sig, call, i) + ifaceAssign := paramType == nil || types.IsInterface(paramType) + affectsInference := false + if fn := typeutil.StaticCallee(info, call); fn != nil { + if sig2 := fn.Type().(*types.Signature); sig2.Recv() == nil { + originParamType := paramTypeAtIndex(sig2, call, i) + affectsInference = originParamType == nil || new(typeparams.Free).Has(originParamType) + } + } + return true, ifaceAssign, affectsInference + } + } + } + } + + return false, false, false +} + +// paramTypeAtIndex returns the effective parameter type at the given argument +// index in call, if valid. +func paramTypeAtIndex(sig *types.Signature, call *ast.CallExpr, index int) types.Type { + if plen := sig.Params().Len(); sig.Variadic() && index >= plen-1 && !call.Ellipsis.IsValid() { + if s, ok := sig.Params().At(plen - 1).Type().(*types.Slice); ok { + return s.Elem() + } + } else if index < plen { + return sig.Params().At(index).Type() + } + return nil // ill typed +} + +// exprContext returns the innermost parent->child expression nodes for the +// given outer-to-inner stack, after stripping parentheses, along with the +// remaining stack up to the parent node. +// +// If no such context exists, returns (nil, nil, nil). +func exprContext(stack []ast.Node) (remaining []ast.Node, parent ast.Node, expr ast.Expr) { + expr, _ = stack[len(stack)-1].(ast.Expr) + if expr == nil { + return nil, nil, nil + } + i := len(stack) - 2 + for ; i >= 0; i-- { + if pexpr, ok := stack[i].(*ast.ParenExpr); ok { + expr = pexpr + } else { + parent = stack[i] + break + } + } + if parent == nil { + return nil, nil, nil + } + // inv: i is the index of parent in the stack. + return stack[:i], parent, expr +} + +// isSelectionOperand reports whether the innermost node of stack is operand +// (x) of a selection x.f. +func isSelectionOperand(stack []ast.Node) bool { + _, parent, expr := exprContext(stack) + if parent == nil { + return false + } + sel, ok := parent.(*ast.SelectorExpr) + return ok && sel.X == expr +} + +// A shadowMap records information about shadowing at any of the parameter's +// references within the callee decl. +// +// For each name shadowed at a reference to the parameter within the callee +// body, shadow map records the 1-based index of the callee decl parameter +// causing the shadowing, or -1, if the shadowing is not due to a callee decl. +// A value of zero (or missing) indicates no shadowing. By convention, +// self-shadowing is excluded from the map. +// +// For example, in the following callee +// +// func f(a, b int) int { +// c := 2 + b +// return a + c +// } +// +// the shadow map of a is {b: 2, c: -1}, because b is shadowed by the 2nd +// parameter. The shadow map of b is {a: 1}, because c is not shadowed at the +// use of b. +type shadowMap map[string]int + +// add returns the [shadowMap] augmented by the set of names +// locally shadowed at the location of the reference in the callee +// (identified by the stack). The name of the reference itself is +// excluded. +// +// These shadowed names may not be used in a replacement expression +// for the reference. +func (s shadowMap) add(info *types.Info, paramIndexes map[types.Object]int, exclude string, stack []ast.Node) shadowMap { + for _, n := range stack { + if scope := scopeFor(info, n); scope != nil { + for _, name := range scope.Names() { + if name != exclude { + if s == nil { + s = make(shadowMap) + } + obj := scope.Lookup(name) + if idx, ok := paramIndexes[obj]; ok { + s[name] = idx + 1 + } else { + s[name] = -1 + } + } + } + } + } + return s +} + +// fieldObjs returns a map of each types.Object defined by the given signature +// to its index in the parameter list. Parameters with missing or blank name +// are skipped. +func fieldObjs(sig *types.Signature) map[types.Object]int { + m := make(map[types.Object]int) + for i := range sig.Params().Len() { + if p := sig.Params().At(i); p.Name() != "" && p.Name() != "_" { + m[p] = i + } + } + return m +} + +func isField(obj types.Object) bool { + if v, ok := obj.(*types.Var); ok && v.IsField() { + return true + } + return false +} + +func isMethod(obj types.Object) bool { + if f, ok := obj.(*types.Func); ok && f.Type().(*types.Signature).Recv() != nil { + return true + } + return false +} + +// -- serialization -- + +var ( + _ gob.GobEncoder = (*Callee)(nil) + _ gob.GobDecoder = (*Callee)(nil) +) + +func (callee *Callee) GobEncode() ([]byte, error) { + var out bytes.Buffer + if err := gob.NewEncoder(&out).Encode(callee.impl); err != nil { + return nil, err + } + return out.Bytes(), nil +} + +func (callee *Callee) GobDecode(data []byte) error { + return gob.NewDecoder(bytes.NewReader(data)).Decode(&callee.impl) +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/calleefx.go b/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/calleefx.go new file mode 100644 index 00000000000..001bf61e93e --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/calleefx.go @@ -0,0 +1,349 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inline + +// This file defines the analysis of callee effects. + +import ( + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/internal/typesinternal" +) + +const ( + rinf = -1 // R∞: arbitrary read from memory + winf = -2 // W∞: arbitrary write to memory (or unknown control) +) + +// calleefx returns a list of parameter indices indicating the order +// in which parameters are first referenced during evaluation of the +// callee, relative both to each other and to other effects of the +// callee (if any), such as arbitrary reads (rinf) and arbitrary +// effects (winf), including unknown control flow. Each parameter +// that is referenced appears once in the list. +// +// For example, the effects list of this function: +// +// func f(x, y, z int) int { +// return y + x + g() + z +// } +// +// is [1 0 -2 2], indicating reads of y and x, followed by the unknown +// effects of the g() call, and finally the read of parameter z. This +// information is used during inlining to ascertain when it is safe +// for parameter references to be replaced by their corresponding +// argument expressions. Such substitutions are permitted only when +// they do not cause "write" operations (those with effects) to +// commute with "read" operations (those that have no effect but are +// not pure). Impure operations may be reordered with other impure +// operations, and pure operations may be reordered arbitrarily. +// +// The analysis ignores the effects of runtime panics, on the +// assumption that well-behaved programs shouldn't encounter them. +func calleefx(info *types.Info, body *ast.BlockStmt, paramInfos map[*types.Var]*paramInfo) []int { + // This traversal analyzes the callee's statements (in syntax + // form, though one could do better with SSA) to compute the + // sequence of events of the following kinds: + // + // 1 read of a parameter variable. + // 2. reads from other memory. + // 3. writes to memory + + var effects []int // indices of parameters, or rinf/winf (-ve) + seen := make(map[int]bool) + effect := func(i int) { + if !seen[i] { + seen[i] = true + effects = append(effects, i) + } + } + + // unknown is called for statements of unknown effects (or control). + unknown := func() { + effect(winf) + + // Ensure that all remaining parameters are "seen" + // after we go into the unknown (unless they are + // unreferenced by the function body). This lets us + // not bother implementing the complete traversal into + // control structures. + // + // TODO(adonovan): add them in a deterministic order. + // (This is not a bug but determinism is good.) + for _, pinfo := range paramInfos { + if !pinfo.IsResult && len(pinfo.Refs) > 0 { + effect(pinfo.Index) + } + } + } + + var visitExpr func(n ast.Expr) + var visitStmt func(n ast.Stmt) bool + visitExpr = func(n ast.Expr) { + switch n := n.(type) { + case *ast.Ident: + if v, ok := info.Uses[n].(*types.Var); ok && !v.IsField() { + // Use of global? + if v.Parent() == v.Pkg().Scope() { + effect(rinf) // read global var + } + + // Use of parameter? + if pinfo, ok := paramInfos[v]; ok && !pinfo.IsResult { + effect(pinfo.Index) // read parameter var + } + + // Use of local variables is ok. + } + + case *ast.BasicLit: + // no effect + + case *ast.FuncLit: + // A func literal has no read or write effect + // until called, and (most) function calls are + // considered to have arbitrary effects. + // So, no effect. + + case *ast.CompositeLit: + for _, elt := range n.Elts { + visitExpr(elt) // note: visits KeyValueExpr + } + + case *ast.ParenExpr: + visitExpr(n.X) + + case *ast.SelectorExpr: + if seln, ok := info.Selections[n]; ok { + visitExpr(n.X) + + // See types.SelectionKind for background. + switch seln.Kind() { + case types.MethodExpr: + // A method expression T.f acts like a + // reference to a func decl, + // so it doesn't read x until called. + + case types.MethodVal, types.FieldVal: + // A field or method value selection x.f + // reads x if the selection indirects a pointer. + + if indirectSelection(seln) { + effect(rinf) + } + } + } else { + // qualified identifier: treat like unqualified + visitExpr(n.Sel) + } + + case *ast.IndexExpr: + if tv := info.Types[n.Index]; tv.IsType() { + // no effect (G[T] instantiation) + } else { + visitExpr(n.X) + visitExpr(n.Index) + switch tv.Type.Underlying().(type) { + case *types.Slice, *types.Pointer: // []T, *[n]T (not string, [n]T) + effect(rinf) // indirect read of slice/array element + } + } + + case *ast.IndexListExpr: + // no effect (M[K,V] instantiation) + + case *ast.SliceExpr: + visitExpr(n.X) + visitExpr(n.Low) + visitExpr(n.High) + visitExpr(n.Max) + + case *ast.TypeAssertExpr: + visitExpr(n.X) + + case *ast.CallExpr: + if info.Types[n.Fun].IsType() { + // conversion T(x) + visitExpr(n.Args[0]) + } else { + // call f(args) + visitExpr(n.Fun) + for i, arg := range n.Args { + if i == 0 && info.Types[arg].IsType() { + continue // new(T), make(T, n) + } + visitExpr(arg) + } + + // The pure built-ins have no effects beyond + // those of their operands (not even memory reads). + // All other calls have unknown effects. + if !typesinternal.CallsPureBuiltin(info, n) { + unknown() // arbitrary effects + } + } + + case *ast.StarExpr: + visitExpr(n.X) + effect(rinf) // *ptr load or store depends on state of heap + + case *ast.UnaryExpr: // + - ! ^ & ~ <- + visitExpr(n.X) + if n.Op == token.ARROW { + unknown() // effect: channel receive + } + + case *ast.BinaryExpr: + visitExpr(n.X) + visitExpr(n.Y) + + case *ast.KeyValueExpr: + visitExpr(n.Key) // may be a struct field + visitExpr(n.Value) + + case *ast.BadExpr: + // no effect + + case nil: + // optional subtree + + default: + // type syntax: unreachable given traversal + panic(n) + } + } + + // visitStmt's result indicates the continuation: + // false for return, true for the next statement. + // + // We could treat return as an unknown, but this way + // yields definite effects for simple sequences like + // {S1; S2; return}, so unreferenced parameters are + // not spuriously added to the effects list, and thus + // not spuriously disqualified from elimination. + visitStmt = func(n ast.Stmt) bool { + switch n := n.(type) { + case *ast.DeclStmt: + decl := n.Decl.(*ast.GenDecl) + for _, spec := range decl.Specs { + switch spec := spec.(type) { + case *ast.ValueSpec: + for _, v := range spec.Values { + visitExpr(v) + } + + case *ast.TypeSpec: + // no effect + } + } + + case *ast.LabeledStmt: + return visitStmt(n.Stmt) + + case *ast.ExprStmt: + visitExpr(n.X) + + case *ast.SendStmt: + visitExpr(n.Chan) + visitExpr(n.Value) + unknown() // effect: channel send + + case *ast.IncDecStmt: + visitExpr(n.X) + unknown() // effect: variable increment + + case *ast.AssignStmt: + for _, lhs := range n.Lhs { + visitExpr(lhs) + } + for _, rhs := range n.Rhs { + visitExpr(rhs) + } + for _, lhs := range n.Lhs { + id, _ := lhs.(*ast.Ident) + if id != nil && id.Name == "_" { + continue // blank assign has no effect + } + if n.Tok == token.DEFINE && id != nil && info.Defs[id] != nil { + continue // new var declared by := has no effect + } + unknown() // assignment to existing var + break + } + + case *ast.GoStmt: + visitExpr(n.Call.Fun) + for _, arg := range n.Call.Args { + visitExpr(arg) + } + unknown() // effect: create goroutine + + case *ast.DeferStmt: + visitExpr(n.Call.Fun) + for _, arg := range n.Call.Args { + visitExpr(arg) + } + unknown() // effect: push defer + + case *ast.ReturnStmt: + for _, res := range n.Results { + visitExpr(res) + } + return false + + case *ast.BlockStmt: + for _, stmt := range n.List { + if !visitStmt(stmt) { + return false + } + } + + case *ast.BranchStmt: + unknown() // control flow + + case *ast.IfStmt: + visitStmt(n.Init) + visitExpr(n.Cond) + unknown() // control flow + + case *ast.SwitchStmt: + visitStmt(n.Init) + visitExpr(n.Tag) + unknown() // control flow + + case *ast.TypeSwitchStmt: + visitStmt(n.Init) + visitStmt(n.Assign) + unknown() // control flow + + case *ast.SelectStmt: + unknown() // control flow + + case *ast.ForStmt: + visitStmt(n.Init) + visitExpr(n.Cond) + unknown() // control flow + + case *ast.RangeStmt: + visitExpr(n.X) + unknown() // control flow + + case *ast.EmptyStmt, *ast.BadStmt: + // no effect + + case nil: + // optional subtree + + default: + panic(n) + } + return true + } + visitStmt(body) + + return effects +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/doc.go b/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/doc.go new file mode 100644 index 00000000000..6bb4cef055d --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/doc.go @@ -0,0 +1,288 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package inline implements inlining of Go function calls. + +The client provides information about the caller and callee, +including the source text, syntax tree, and type information, and +the inliner returns the modified source file for the caller, or an +error if the inlining operation is invalid (for example because the +function body refers to names that are inaccessible to the caller). + +Although this interface demands more information from the client +than might seem necessary, it enables smoother integration with +existing batch and interactive tools that have their own ways of +managing the processes of reading, parsing, and type-checking +packages. In particular, this package does not assume that the +caller and callee belong to the same token.FileSet or +types.Importer realms. + +There are many aspects to a function call. It is the only construct +that can simultaneously bind multiple variables of different +explicit types, with implicit assignment conversions. (Neither var +nor := declarations can do that.) It defines the scope of control +labels, of return statements, and of defer statements. Arguments +and results of function calls may be tuples even though tuples are +not first-class values in Go, and a tuple-valued call expression +may be "spread" across the argument list of a call or the operands +of a return statement. All these unique features mean that in the +general case, not everything that can be expressed by a function +call can be expressed without one. + +So, in general, inlining consists of modifying a function or method +call expression f(a1, ..., an) so that the name of the function f +is replaced ("literalized") by a literal copy of the function +declaration, with free identifiers suitably modified to use the +locally appropriate identifiers or perhaps constant argument +values. + +Inlining must not change the semantics of the call. Semantics +preservation is crucial for clients such as codebase maintenance +tools that automatically inline all calls to designated functions +on a large scale. Such tools must not introduce subtle behavior +changes. (Fully inlining a call is dynamically observable using +reflection over the call stack, but this exception to the rule is +explicitly allowed.) + +In many cases it is possible to entirely replace ("reduce") the +call by a copy of the function's body in which parameters have been +replaced by arguments. The inliner supports a number of reduction +strategies, and we expect this set to grow. Nonetheless, sound +reduction is surprisingly tricky. + +The inliner is in some ways like an optimizing compiler. A compiler +is considered correct if it doesn't change the meaning of the +program in translation from source language to target language. An +optimizing compiler exploits the particulars of the input to +generate better code, where "better" usually means more efficient. +When a case is found in which it emits suboptimal code, the +compiler is improved to recognize more cases, or more rules, and +more exceptions to rules; this process has no end. Inlining is +similar except that "better" code means tidier code. The baseline +translation (literalization) is correct, but there are endless +rules--and exceptions to rules--by which the output can be +improved. + +The following section lists some of the challenges, and ways in +which they can be addressed. + + - All effects of the call argument expressions must be preserved, + both in their number (they must not be eliminated or repeated), + and in their order (both with respect to other arguments, and any + effects in the callee function). + + This must be the case even if the corresponding parameters are + never referenced, are referenced multiple times, referenced in + a different order from the arguments, or referenced within a + nested function that may be executed an arbitrary number of + times. + + Currently, parameter replacement is not applied to arguments + with effects, but with further analysis of the sequence of + strict effects within the callee we could relax this constraint. + + - When not all parameters can be substituted by their arguments + (e.g. due to possible effects), if the call appears in a + statement context, the inliner may introduce a var declaration + that declares the parameter variables (with the correct types) + and assigns them to their corresponding argument values. + The rest of the function body may then follow. + For example, the call + + f(1, 2) + + to the function + + func f(x, y int32) { stmts } + + may be reduced to + + { var x, y int32 = 1, 2; stmts }. + + There are many reasons why this is not always possible. For + example, true parameters are statically resolved in the same + scope, and are dynamically assigned their arguments in + parallel; but each spec in a var declaration is statically + resolved in sequence and dynamically executed in sequence, so + earlier parameters may shadow references in later ones. + + - Even an argument expression as simple as ptr.x may not be + referentially transparent, because another argument may have the + effect of changing the value of ptr. + + This constraint could be relaxed by some kind of alias or + escape analysis that proves that ptr cannot be mutated during + the call. + + - Although constants are referentially transparent, as a matter of + style we do not wish to duplicate literals that are referenced + multiple times in the body because this undoes proper factoring. + Also, string literals may be arbitrarily large. + + - If the function body consists of statements other than just + "return expr", in some contexts it may be syntactically + impossible to reduce the call. Consider: + + if x := f(); cond { ... } + + Go has no equivalent to Lisp's progn or Rust's blocks, + nor ML's let expressions (let param = arg in body); + its closest equivalent is func(param){body}(arg). + Reduction strategies must therefore consider the syntactic + context of the call. + + In such situations we could work harder to extract a statement + context for the call, by transforming it to: + + { x := f(); if cond { ... } } + + - Similarly, without the equivalent of Rust-style blocks and + first-class tuples, there is no general way to reduce a call + to a function such as + + func(params)(args)(results) { stmts; return expr } + + to an expression such as + + { var params = args; stmts; expr } + + or even a statement such as + + results = { var params = args; stmts; expr } + + Consequently the declaration and scope of the result variables, + and the assignment and control-flow implications of the return + statement, must be dealt with by cases. + + - A standalone call statement that calls a function whose body is + "return expr" cannot be simply replaced by the body expression + if it is not itself a call or channel receive expression; it is + necessary to explicitly discard the result using "_ = expr". + + Similarly, if the body is a call expression, only calls to some + built-in functions with no result (such as copy or panic) are + permitted as statements, whereas others (such as append) return + a result that must be used, even if just by discarding. + + - If a parameter or result variable is updated by an assignment + within the function body, it cannot always be safely replaced + by a variable in the caller. For example, given + + func f(a int) int { a++; return a } + + The call y = f(x) cannot be replaced by { x++; y = x } because + this would change the value of the caller's variable x. + Only if the caller is finished with x is this safe. + + A similar argument applies to parameter or result variables + that escape: by eliminating a variable, inlining would change + the identity of the variable that escapes. + + - If the function body uses 'defer' and the inlined call is not a + tail-call, inlining may delay the deferred effects. + + - Because the scope of a control label is the entire function, a + call cannot be reduced if the caller and callee have intersecting + sets of control labels. (It is possible to α-rename any + conflicting ones, but our colleagues building C++ refactoring + tools report that, when tools must choose new identifiers, they + generally do a poor job.) + + - Given + + func f() uint8 { return 0 } + + var x any = f() + + reducing the call to var x any = 0 is unsound because it + discards the implicit conversion to uint8. We may need to make + each argument-to-parameter conversion explicit if the types + differ. Assignments to variadic parameters may need to + explicitly construct a slice. + + An analogous problem applies to the implicit assignments in + return statements: + + func g() any { return f() } + + Replacing the call f() with 0 would silently lose a + conversion to uint8 and change the behavior of the program. + + - When inlining a call f(1, x, g()) where those parameters are + unreferenced, we should be able to avoid evaluating 1 and x + since they are pure and thus have no effect. But x may be the + last reference to a local variable in the caller, so removing + it would cause a compilation error. Parameter substitution must + avoid making the caller's local variables unreferenced (or must + be prepared to eliminate the declaration too---this is where an + iterative framework for simplification would really help). + + - An expression such as s[i] may be valid if s and i are + variables but invalid if either or both of them are constants. + For example, a negative constant index s[-1] is always out of + bounds, and even a non-negative constant index may be out of + bounds depending on the particular string constant (e.g. + "abc"[4]). + + So, if a parameter participates in any expression that is + subject to additional compile-time checks when its operands are + constant, it may be unsafe to substitute that parameter by a + constant argument value (#62664). + +More complex callee functions are inlinable with more elaborate and +invasive changes to the statements surrounding the call expression. + +TODO(adonovan): future work: + + - Handle more of the above special cases by careful analysis, + thoughtful factoring of the large design space, and thorough + test coverage. + + - Compute precisely (not conservatively) when parameter + substitution would remove the last reference to a caller local + variable, and blank out the local instead of retreating from + the substitution. + + - Afford the client more control such as a limit on the total + increase in line count, or a refusal to inline using the + general approach (replacing name by function literal). This + could be achieved by returning metadata alongside the result + and having the client conditionally discard the change. + + - Support inlining of generic functions, replacing type parameters + by their instantiations. + + - Support inlining of calls to function literals ("closures"). + But note that the existing algorithm makes widespread assumptions + that the callee is a package-level function or method. + + - Eliminate explicit conversions of "untyped" literals inserted + conservatively when they are redundant. For example, the + conversion int32(1) is redundant when this value is used only as a + slice index; but it may be crucial if it is used in x := int32(1) + as it changes the type of x, which may have further implications. + The conversions may also be important to the falcon analysis. + + - Allow non-'go' build systems such as Bazel/Blaze a chance to + decide whether an import is accessible using logic other than + "/internal/" path segments. This could be achieved by returning + the list of added import paths instead of a text diff. + + - Inlining a function from another module may change the + effective version of the Go language spec that governs it. We + should probably make the client responsible for rejecting + attempts to inline from newer callees to older callers, since + there's no way for this package to access module versions. + + - Use an alternative implementation of the import-organizing + operation that doesn't require operating on a complete file + (and reformatting). Then return the results in a higher-level + form as a set of import additions and deletions plus a single + diff that encloses the call expression. This interface could + perhaps be implemented atop imports.Process by post-processing + its result to obtain the abstract import changes and discarding + its formatted output. +*/ +package inline diff --git a/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/escape.go b/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/escape.go new file mode 100644 index 00000000000..45cce11a9e2 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/escape.go @@ -0,0 +1,102 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inline + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" +) + +// escape implements a simple "address-taken" escape analysis. It +// calls f for each local variable that appears on the left side of an +// assignment (escapes=false) or has its address taken (escapes=true). +// The initialization of a variable by its declaration does not count +// as an assignment. +func escape(info *types.Info, root ast.Node, f func(v *types.Var, escapes bool)) { + + // lvalue is called for each address-taken expression or LHS of assignment. + // Supported forms are: x, (x), x[i], x.f, *x, T{}. + var lvalue func(e ast.Expr, escapes bool) + lvalue = func(e ast.Expr, escapes bool) { + switch e := e.(type) { + case *ast.Ident: + if v, ok := info.Uses[e].(*types.Var); ok { + if !isPkgLevel(v) { + f(v, escapes) + } + } + case *ast.ParenExpr: + lvalue(e.X, escapes) + case *ast.IndexExpr: + // TODO(adonovan): support generics without assuming e.X has a core type. + // Consider: + // + // func Index[T interface{ [3]int | []int }](t T, i int) *int { + // return &t[i] + // } + // + // We must traverse the normal terms and check + // whether any of them is an array. + // + // We assume TypeOf returns non-nil. + if _, ok := info.TypeOf(e.X).Underlying().(*types.Array); ok { + lvalue(e.X, escapes) // &a[i] on array + } + case *ast.SelectorExpr: + // We assume TypeOf returns non-nil. + if _, ok := info.TypeOf(e.X).Underlying().(*types.Struct); ok { + lvalue(e.X, escapes) // &s.f on struct + } + case *ast.StarExpr: + // *ptr indirects an existing pointer + case *ast.CompositeLit: + // &T{...} creates a new variable + default: + panic(fmt.Sprintf("&x on %T", e)) // unreachable in well-typed code + } + } + + // Search function body for operations &x, x.f(), x++, and x = y + // where x is a parameter. Each of these treats x as an address. + ast.Inspect(root, func(n ast.Node) bool { + switch n := n.(type) { + case *ast.UnaryExpr: + if n.Op == token.AND { + lvalue(n.X, true) // &x + } + + case *ast.CallExpr: + // implicit &x in method call x.f(), + // where x has type T and method is (*T).f + if sel, ok := n.Fun.(*ast.SelectorExpr); ok { + if seln, ok := info.Selections[sel]; ok && + seln.Kind() == types.MethodVal && + isPointer(seln.Obj().Type().Underlying().(*types.Signature).Recv().Type()) { + tArg, indirect := effectiveReceiver(seln) + if !indirect && !isPointer(tArg) { + lvalue(sel.X, true) // &x.f + } + } + } + + case *ast.AssignStmt: + for _, lhs := range n.Lhs { + if id, ok := lhs.(*ast.Ident); ok && + info.Defs[id] != nil && + n.Tok == token.DEFINE { + // declaration: doesn't count + } else { + lvalue(lhs, false) + } + } + + case *ast.IncDecStmt: + lvalue(n.X, false) + } + return true + }) +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/falcon.go b/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/falcon.go new file mode 100644 index 00000000000..037d33bd5a2 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/falcon.go @@ -0,0 +1,879 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inline + +// This file defines the callee side of the "fallible constant" analysis. + +import ( + "fmt" + "go/ast" + "go/constant" + "go/format" + "go/token" + "go/types" + "strconv" + "strings" + + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/typeparams" +) + +// falconResult is the result of the analysis of the callee. +type falconResult struct { + Types []falconType // types for falcon constraint environment + Constraints []string // constraints (Go expressions) on values of fallible constants +} + +// A falconType specifies the name and underlying type of a synthetic +// defined type for use in falcon constraints. +// +// Unique types from callee code are bijectively mapped onto falcon +// types so that constraints are independent of callee type +// information but preserve type equivalence classes. +// +// Fresh names are deliberately obscure to avoid shadowing even if a +// callee parameter has a name like "int" or "any". +type falconType struct { + Name string + Kind types.BasicKind // string/number/bool +} + +// falcon identifies "fallible constant" expressions, which are +// expressions that may fail to compile if one or more of their +// operands is changed from non-constant to constant. +// +// Consider: +// +// func sub(s string, i, j int) string { return s[i:j] } +// +// If parameters are replaced by constants, the compiler is +// required to perform these additional checks: +// +// - if i is constant, 0 <= i. +// - if s and i are constant, i <= len(s). +// - ditto for j. +// - if i and j are constant, i <= j. +// +// s[i:j] is thus a "fallible constant" expression dependent on {s, i, +// j}. Each falcon creates a set of conditional constraints across one +// or more parameter variables. +// +// - When inlining a call such as sub("abc", -1, 2), the parameter i +// cannot be eliminated by substitution as its argument value is +// negative. +// +// - When inlining sub("", 2, 1), all three parameters cannot be +// simultaneously eliminated by substitution without violating i +// <= len(s) and j <= len(s), but the parameters i and j could be +// safely eliminated without s. +// +// Parameters that cannot be eliminated must remain non-constant, +// either in the form of a binding declaration: +// +// { var i int = -1; return "abc"[i:2] } +// +// or a parameter of a literalization: +// +// func (i int) string { return "abc"[i:2] }(-1) +// +// These example expressions are obviously doomed to fail at run +// time, but in realistic cases such expressions are dominated by +// appropriate conditions that make them reachable only when safe: +// +// if 0 <= i && i <= j && j <= len(s) { _ = s[i:j] } +// +// (In principle a more sophisticated inliner could entirely eliminate +// such unreachable blocks based on the condition being always-false +// for the given parameter substitution, but this is tricky to do safely +// because the type-checker considers only a single configuration. +// Consider: if runtime.GOOS == "linux" { ... }.) +// +// We believe this is an exhaustive list of "fallible constant" operations: +// +// - switch z { case x: case y } // duplicate case values +// - s[i], s[i:j], s[i:j:k] // index out of bounds (0 <= i <= j <= k <= len(s)) +// - T{x: 0} // index out of bounds, duplicate index +// - x/y, x%y, x/=y, x%=y // integer division by zero; minint/-1 overflow +// - x+y, x-y, x*y // arithmetic overflow +// - x< 1 { + var elts []ast.Expr + for _, elem := range elems { + elts = append(elts, &ast.KeyValueExpr{ + Key: elem, + Value: makeIntLit(0), + }) + } + st.emit(&ast.CompositeLit{ + Type: typ, + Elts: elts, + }) + } +} + +// -- traversal -- + +// The traversal functions scan the callee body for expressions that +// are not constant but would become constant if the parameter vars +// were redeclared as constants, and emits for each one a constraint +// (a Go expression) with the property that it will not type-check +// (using types.CheckExpr) if the particular argument values are +// unsuitable. +// +// These constraints are checked by Inline with the actual +// constant argument values. Violations cause it to reject +// parameters as candidates for substitution. + +func (st *falconState) stmt(s ast.Stmt) { + ast.Inspect(s, func(n ast.Node) bool { + switch n := n.(type) { + case ast.Expr: + _ = st.expr(n) + return false // skip usual traversal + + case *ast.AssignStmt: + switch n.Tok { + case token.QUO_ASSIGN, token.REM_ASSIGN: + // x /= y + // Possible "integer division by zero" + // Emit constraint: 1/y. + _ = st.expr(n.Lhs[0]) + kY := st.expr(n.Rhs[0]) + if kY, ok := kY.(ast.Expr); ok { + op := token.QUO + if n.Tok == token.REM_ASSIGN { + op = token.REM + } + st.emit(&ast.BinaryExpr{ + Op: op, + X: makeIntLit(1), + Y: kY, + }) + } + return false // skip usual traversal + } + + case *ast.SwitchStmt: + if n.Init != nil { + st.stmt(n.Init) + } + tBool := types.Type(types.Typ[types.Bool]) + tagType := tBool // default: true + if n.Tag != nil { + st.expr(n.Tag) + tagType = st.info.TypeOf(n.Tag) + } + + // Possible "duplicate case value". + // Emit constraint map[T]int{v1: 0, ..., vN:0} + // to ensure all maybe-constant case values are unique + // (unless switch tag is boolean, which is relaxed). + var unique []ast.Expr + for _, clause := range n.Body.List { + clause := clause.(*ast.CaseClause) + for _, caseval := range clause.List { + if k := st.expr(caseval); k != nil { + unique = append(unique, st.toExpr(k)) + } + } + for _, stmt := range clause.Body { + st.stmt(stmt) + } + } + if unique != nil && !types.Identical(tagType.Underlying(), tBool) { + tname := st.any + if !types.IsInterface(tagType) { + tname = st.typename(tagType) + } + t := &ast.MapType{ + Key: makeIdent(tname), + Value: makeIdent(st.int), + } + st.emitUnique(t, unique) + } + } + return true + }) +} + +// fieldTypes visits the .Type of each field in the list. +func (st *falconState) fieldTypes(fields *ast.FieldList) { + if fields != nil { + for _, field := range fields.List { + _ = st.expr(field.Type) + } + } +} + +// expr visits the expression (or type) and returns a +// non-nil result if the expression is constant or would +// become constant if all suitable function parameters were +// redeclared as constants. +// +// If the expression is constant, st.expr returns its type +// and value (types.TypeAndValue). If the expression would +// become constant, st.expr returns an ast.Expr tree whose +// leaves are literals and parameter references, and whose +// interior nodes are operations that may become constant, +// such as -x, x+y, f(x), and T(x). We call these would-be +// constant expressions "fallible constants", since they may +// fail to type-check for some values of x, i, and j. (We +// refer to the non-nil cases collectively as "maybe +// constant", and the nil case as "definitely non-constant".) +// +// As a side effect, st.expr emits constraints for each +// fallible constant expression; this is its main purpose. +// +// Consequently, st.expr must visit the entire subtree so +// that all necessary constraints are emitted. It may not +// short-circuit the traversal when it encounters a constant +// subexpression as constants may contain arbitrary other +// syntax that may impose constraints. Consider (as always) +// this contrived but legal example of a type parameter (!) +// that contains statement syntax: +// +// func f[T [unsafe.Sizeof(func() { stmts })]int]() +// +// There is no need to emit constraints for (e.g.) s[i] when s +// and i are already constants, because we know the expression +// is sound, but it is sometimes easier to emit these +// redundant constraints than to avoid them. +func (st *falconState) expr(e ast.Expr) (res any) { // = types.TypeAndValue | ast.Expr + tv := st.info.Types[e] + if tv.Value != nil { + // A constant value overrides any other result. + defer func() { res = tv }() + } + + switch e := e.(type) { + case *ast.Ident: + if v, ok := st.info.Uses[e].(*types.Var); ok { + if _, ok := st.params[v]; ok && isBasic(v.Type(), types.IsConstType) { + return e // reference to constable parameter + } + } + // (References to *types.Const are handled by the defer.) + + case *ast.BasicLit: + // constant + + case *ast.ParenExpr: + return st.expr(e.X) + + case *ast.FuncLit: + _ = st.expr(e.Type) + st.stmt(e.Body) + // definitely non-constant + + case *ast.CompositeLit: + // T{k: v, ...}, where T ∈ {array,*array,slice,map}, + // imposes a constraint that all constant k are + // distinct and, for arrays [n]T, within range 0-n. + // + // Types matter, not just values. For example, + // an interface-keyed map may contain keys + // that are numerically equal so long as they + // are of distinct types. For example: + // + // type myint int + // map[any]bool{1: true, 1: true} // error: duplicate key + // map[any]bool{1: true, int16(1): true} // ok + // map[any]bool{1: true, myint(1): true} // ok + // + // This can be asserted by emitting a + // constraint of the form T{k1: 0, ..., kN: 0}. + if e.Type != nil { + _ = st.expr(e.Type) + } + t := types.Unalias(typeparams.Deref(tv.Type)) + ct := typeparams.CoreType(t) + var mapKeys []ast.Expr // map key expressions; must be distinct if constant + for _, elt := range e.Elts { + if kv, ok := elt.(*ast.KeyValueExpr); ok { + if is[*types.Map](ct) { + if k := st.expr(kv.Key); k != nil { + mapKeys = append(mapKeys, st.toExpr(k)) + } + } + _ = st.expr(kv.Value) + } else { + _ = st.expr(elt) + } + } + if len(mapKeys) > 0 { + // Inlining a map literal may replace variable key expressions by constants. + // All such constants must have distinct values. + // (Array and slice literals do not permit non-constant keys.) + t := ct.(*types.Map) + var typ ast.Expr + if types.IsInterface(t.Key()) { + typ = &ast.MapType{ + Key: makeIdent(st.any), + Value: makeIdent(st.int), + } + } else { + typ = &ast.MapType{ + Key: makeIdent(st.typename(t.Key())), + Value: makeIdent(st.int), + } + } + st.emitUnique(typ, mapKeys) + } + // definitely non-constant + + case *ast.SelectorExpr: + _ = st.expr(e.X) + _ = st.expr(e.Sel) + // The defer is sufficient to handle + // qualified identifiers (pkg.Const). + // All other cases are definitely non-constant. + + case *ast.IndexExpr: + if tv.IsType() { + // type C[T] + _ = st.expr(e.X) + _ = st.expr(e.Index) + } else { + // term x[i] + // + // Constraints (if x is slice/string/array/*array, not map): + // - i >= 0 + // if i is a fallible constant + // - i < len(x) + // if x is array/*array and + // i is a fallible constant; + // or if s is a string and both i, + // s are maybe-constants, + // but not both are constants. + kX := st.expr(e.X) + kI := st.expr(e.Index) + if kI != nil && !is[*types.Map](st.info.TypeOf(e.X).Underlying()) { + if kI, ok := kI.(ast.Expr); ok { + st.emitNonNegative(kI) + } + // Emit constraint to check indices against known length. + // TODO(adonovan): factor with SliceExpr logic. + var x ast.Expr + if kX != nil { + // string + x = st.toExpr(kX) + } else if arr, ok := typeparams.CoreType(typeparams.Deref(st.info.TypeOf(e.X))).(*types.Array); ok { + // array, *array + x = &ast.CompositeLit{ + Type: &ast.ArrayType{ + Len: makeIntLit(arr.Len()), + Elt: makeIdent(st.int), + }, + } + } + if x != nil { + st.emit(&ast.IndexExpr{ + X: x, + Index: st.toExpr(kI), + }) + } + } + } + // definitely non-constant + + case *ast.SliceExpr: + // x[low:high:max] + // + // Emit non-negative constraints for each index, + // plus low <= high <= max <= len(x) + // for each pair that are maybe-constant + // but not definitely constant. + + kX := st.expr(e.X) + var kLow, kHigh, kMax any + if e.Low != nil { + kLow = st.expr(e.Low) + if kLow != nil { + if kLow, ok := kLow.(ast.Expr); ok { + st.emitNonNegative(kLow) + } + } + } + if e.High != nil { + kHigh = st.expr(e.High) + if kHigh != nil { + if kHigh, ok := kHigh.(ast.Expr); ok { + st.emitNonNegative(kHigh) + } + if kLow != nil { + st.emitMonotonic(st.toExpr(kLow), st.toExpr(kHigh)) + } + } + } + if e.Max != nil { + kMax = st.expr(e.Max) + if kMax != nil { + if kMax, ok := kMax.(ast.Expr); ok { + st.emitNonNegative(kMax) + } + if kHigh != nil { + st.emitMonotonic(st.toExpr(kHigh), st.toExpr(kMax)) + } + } + } + + // Emit constraint to check indices against known length. + var x ast.Expr + if kX != nil { + // string + x = st.toExpr(kX) + } else if arr, ok := typeparams.CoreType(typeparams.Deref(st.info.TypeOf(e.X))).(*types.Array); ok { + // array, *array + x = &ast.CompositeLit{ + Type: &ast.ArrayType{ + Len: makeIntLit(arr.Len()), + Elt: makeIdent(st.int), + }, + } + } + if x != nil { + // Avoid slice[::max] if kHigh is nonconstant (nil). + high, max := st.toExpr(kHigh), st.toExpr(kMax) + if high == nil { + high = max // => slice[:max:max] + } + st.emit(&ast.SliceExpr{ + X: x, + Low: st.toExpr(kLow), + High: high, + Max: max, + }) + } + // definitely non-constant + + case *ast.TypeAssertExpr: + _ = st.expr(e.X) + if e.Type != nil { + _ = st.expr(e.Type) + } + + case *ast.CallExpr: + _ = st.expr(e.Fun) + if tv, ok := st.info.Types[e.Fun]; ok && tv.IsType() { + // conversion T(x) + // + // Possible "value out of range". + kX := st.expr(e.Args[0]) + if kX != nil && isBasic(tv.Type, types.IsConstType) { + conv := convert(makeIdent(st.typename(tv.Type)), st.toExpr(kX)) + if is[ast.Expr](kX) { + st.emit(conv) + } + return conv + } + return nil // definitely non-constant + } + + // call f(x) + + all := true // all args are possibly-constant + kArgs := make([]ast.Expr, len(e.Args)) + for i, arg := range e.Args { + if kArg := st.expr(arg); kArg != nil { + kArgs[i] = st.toExpr(kArg) + } else { + all = false + } + } + + // Calls to built-ins with fallibly constant arguments + // may become constant. All other calls are either + // constant or non-constant + if id, ok := e.Fun.(*ast.Ident); ok && all && tv.Value == nil { + if builtin, ok := st.info.Uses[id].(*types.Builtin); ok { + switch builtin.Name() { + case "len", "imag", "real", "complex", "min", "max": + return &ast.CallExpr{ + Fun: id, + Args: kArgs, + Ellipsis: e.Ellipsis, + } + } + } + } + + case *ast.StarExpr: // *T, *ptr + _ = st.expr(e.X) + + case *ast.UnaryExpr: + // + - ! ^ & <- ~ + // + // Possible "negation of minint". + // Emit constraint: -x + kX := st.expr(e.X) + if kX != nil && !is[types.TypeAndValue](kX) { + if e.Op == token.SUB { + st.emit(&ast.UnaryExpr{ + Op: e.Op, + X: st.toExpr(kX), + }) + } + + return &ast.UnaryExpr{ + Op: e.Op, + X: st.toExpr(kX), + } + } + + case *ast.BinaryExpr: + kX := st.expr(e.X) + kY := st.expr(e.Y) + switch e.Op { + case token.QUO, token.REM: + // x/y, x%y + // + // Possible "integer division by zero" or + // "minint / -1" overflow. + // Emit constraint: x/y or 1/y + if kY != nil { + if kX == nil { + kX = makeIntLit(1) + } + st.emit(&ast.BinaryExpr{ + Op: e.Op, + X: st.toExpr(kX), + Y: st.toExpr(kY), + }) + } + + case token.ADD, token.SUB, token.MUL: + // x+y, x-y, x*y + // + // Possible "arithmetic overflow". + // Emit constraint: x+y + if kX != nil && kY != nil { + st.emit(&ast.BinaryExpr{ + Op: e.Op, + X: st.toExpr(kX), + Y: st.toExpr(kY), + }) + } + + case token.SHL, token.SHR: + // x << y, x >> y + // + // Possible "constant shift too large". + // Either operand may be too large individually, + // and they may be too large together. + // Emit constraint: + // x << y (if both maybe-constant) + // x << 0 (if y is non-constant) + // 1 << y (if x is non-constant) + if kX != nil || kY != nil { + x := st.toExpr(kX) + if x == nil { + x = makeIntLit(1) + } + y := st.toExpr(kY) + if y == nil { + y = makeIntLit(0) + } + st.emit(&ast.BinaryExpr{ + Op: e.Op, + X: x, + Y: y, + }) + } + + case token.LSS, token.GTR, token.EQL, token.NEQ, token.LEQ, token.GEQ: + // < > == != <= <= + // + // A "x cmp y" expression with constant operands x, y is + // itself constant, but I can't see how a constant bool + // could be fallible: the compiler doesn't reject duplicate + // boolean cases in a switch, presumably because boolean + // switches are less like n-way branches and more like + // sequential if-else chains with possibly overlapping + // conditions; and there is (sadly) no way to convert a + // boolean constant to an int constant. + } + if kX != nil && kY != nil { + return &ast.BinaryExpr{ + Op: e.Op, + X: st.toExpr(kX), + Y: st.toExpr(kY), + } + } + + // types + // + // We need to visit types (and even type parameters) + // in order to reach all the places where things could go wrong: + // + // const ( + // s = "" + // i = 0 + // ) + // type C[T [unsafe.Sizeof(func() { _ = s[i] })]int] bool + + case *ast.IndexListExpr: + _ = st.expr(e.X) + for _, expr := range e.Indices { + _ = st.expr(expr) + } + + case *ast.Ellipsis: + if e.Elt != nil { + _ = st.expr(e.Elt) + } + + case *ast.ArrayType: + if e.Len != nil { + _ = st.expr(e.Len) + } + _ = st.expr(e.Elt) + + case *ast.StructType: + st.fieldTypes(e.Fields) + + case *ast.FuncType: + st.fieldTypes(e.TypeParams) + st.fieldTypes(e.Params) + st.fieldTypes(e.Results) + + case *ast.InterfaceType: + st.fieldTypes(e.Methods) + + case *ast.MapType: + _ = st.expr(e.Key) + _ = st.expr(e.Value) + + case *ast.ChanType: + _ = st.expr(e.Value) + } + return +} + +// toExpr converts the result of visitExpr to a falcon expression. +// (We don't do this in visitExpr as we first need to discriminate +// constants from maybe-constants.) +func (st *falconState) toExpr(x any) ast.Expr { + switch x := x.(type) { + case nil: + return nil + + case types.TypeAndValue: + lit := makeLiteral(x.Value) + if !isBasic(x.Type, types.IsUntyped) { + // convert to "typed" type + lit = &ast.CallExpr{ + Fun: makeIdent(st.typename(x.Type)), + Args: []ast.Expr{lit}, + } + } + return lit + + case ast.Expr: + return x + + default: + panic(x) + } +} + +func makeLiteral(v constant.Value) ast.Expr { + switch v.Kind() { + case constant.Bool: + // Rather than refer to the true or false built-ins, + // which could be shadowed by poorly chosen parameter + // names, we use 0 == 0 for true and 0 != 0 for false. + op := token.EQL + if !constant.BoolVal(v) { + op = token.NEQ + } + return &ast.BinaryExpr{ + Op: op, + X: makeIntLit(0), + Y: makeIntLit(0), + } + + case constant.String: + return &ast.BasicLit{ + Kind: token.STRING, + Value: v.ExactString(), + } + + case constant.Int: + return &ast.BasicLit{ + Kind: token.INT, + Value: v.ExactString(), + } + + case constant.Float: + return &ast.BasicLit{ + Kind: token.FLOAT, + Value: v.ExactString(), + } + + case constant.Complex: + // The components could be float or int. + y := makeLiteral(constant.Imag(v)) + y.(*ast.BasicLit).Value += "i" // ugh + if re := constant.Real(v); !consteq(re, kZeroInt) { + // complex: x + yi + y = &ast.BinaryExpr{ + Op: token.ADD, + X: makeLiteral(re), + Y: y, + } + } + return y + + default: + panic(v.Kind()) + } +} + +func makeIntLit(x int64) *ast.BasicLit { + return &ast.BasicLit{ + Kind: token.INT, + Value: strconv.FormatInt(x, 10), + } +} + +func isBasic(t types.Type, info types.BasicInfo) bool { + basic, ok := t.Underlying().(*types.Basic) + return ok && basic.Info()&info != 0 +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/free.go b/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/free.go new file mode 100644 index 00000000000..e3cf313a8a8 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/free.go @@ -0,0 +1,382 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copied, with considerable changes, from go/parser/resolver.go +// at af53bd2c03. + +package inline + +import ( + "go/ast" + "go/token" +) + +// freeishNames computes an approximation to the free names of the AST +// at node n based solely on syntax, inserting values into the map. +// +// In the absence of composite literals, the set of free names is exact. Composite +// literals introduce an ambiguity that can only be resolved with type information: +// whether F is a field name or a value in `T{F: ...}`. +// If includeComplitIdents is true, this function conservatively assumes +// T is not a struct type, so freeishNames overapproximates: the resulting +// set may contain spurious entries that are not free lexical references +// but are references to struct fields. +// If includeComplitIdents is false, this function assumes that T *is* +// a struct type, so freeishNames underapproximates: the resulting set +// may omit names that are free lexical references. +// +// The code is based on go/parser.resolveFile, but heavily simplified. Crucial +// differences are: +// - Instead of resolving names to their objects, this function merely records +// whether they are free. +// - Labels are ignored: they do not refer to values. +// - This is never called on FuncDecls or ImportSpecs, so the function +// panics if it sees one. +func freeishNames(free map[string]bool, n ast.Node, includeComplitIdents bool) { + v := &freeVisitor{free: free, includeComplitIdents: includeComplitIdents} + // Begin with a scope, even though n might not be a form that establishes a scope. + // For example, n might be: + // x := ... + // Then we need to add the first x to some scope. + v.openScope() + ast.Walk(v, n) + v.closeScope() + assert(v.scope == nil, "unbalanced scopes") +} + +// A freeVisitor holds state for a free-name analysis. +type freeVisitor struct { + scope *scope // the current innermost scope + free map[string]bool // free names seen so far + includeComplitIdents bool // include identifier key in composite literals +} + +// scope contains all the names defined in a lexical scope. +// It is like ast.Scope, but without deprecation warnings. +type scope struct { + names map[string]bool + outer *scope +} + +func (s *scope) defined(name string) bool { + for ; s != nil; s = s.outer { + if s.names[name] { + return true + } + } + return false +} + +func (v *freeVisitor) Visit(n ast.Node) ast.Visitor { + switch n := n.(type) { + + // Expressions. + case *ast.Ident: + v.resolve(n) + + case *ast.FuncLit: + v.openScope() + defer v.closeScope() + v.walkFuncType(n.Type) + v.walkBody(n.Body) + + case *ast.SelectorExpr: + v.walk(n.X) + // Skip n.Sel: it cannot be free. + + case *ast.StructType: + v.openScope() + defer v.closeScope() + v.walkFieldList(n.Fields) + + case *ast.FuncType: + v.openScope() + defer v.closeScope() + v.walkFuncType(n) + + case *ast.CompositeLit: + v.walk(n.Type) + for _, e := range n.Elts { + if kv, _ := e.(*ast.KeyValueExpr); kv != nil { + if ident, _ := kv.Key.(*ast.Ident); ident != nil { + // It is not possible from syntax alone to know whether + // an identifier used as a composite literal key is + // a struct field (if n.Type is a struct) or a value + // (if n.Type is a map, slice or array). + if v.includeComplitIdents { + // Over-approximate by treating both cases as potentially + // free names. + v.resolve(ident) + } else { + // Under-approximate by ignoring potentially free names. + } + } else { + v.walk(kv.Key) + } + v.walk(kv.Value) + } else { + v.walk(e) + } + } + + case *ast.InterfaceType: + v.openScope() + defer v.closeScope() + v.walkFieldList(n.Methods) + + // Statements + case *ast.AssignStmt: + walkSlice(v, n.Rhs) + if n.Tok == token.DEFINE { + v.shortVarDecl(n.Lhs) + } else { + walkSlice(v, n.Lhs) + } + + case *ast.LabeledStmt: + // ignore labels + // TODO(jba): consider labels? + v.walk(n.Stmt) + + case *ast.BranchStmt: + // Ignore labels. + // TODO(jba): consider labels? + + case *ast.BlockStmt: + v.openScope() + defer v.closeScope() + walkSlice(v, n.List) + + case *ast.IfStmt: + v.openScope() + defer v.closeScope() + v.walk(n.Init) + v.walk(n.Cond) + v.walk(n.Body) + v.walk(n.Else) + + case *ast.CaseClause: + walkSlice(v, n.List) + v.openScope() + defer v.closeScope() + walkSlice(v, n.Body) + + case *ast.SwitchStmt: + v.openScope() + defer v.closeScope() + v.walk(n.Init) + v.walk(n.Tag) + v.walkBody(n.Body) + + case *ast.TypeSwitchStmt: + if n.Init != nil { + v.openScope() + defer v.closeScope() + v.walk(n.Init) + } + v.openScope() + defer v.closeScope() + v.walk(n.Assign) + // We can use walkBody here because we don't track label scopes. + v.walkBody(n.Body) + + case *ast.CommClause: + v.openScope() + defer v.closeScope() + v.walk(n.Comm) + walkSlice(v, n.Body) + + case *ast.SelectStmt: + v.walkBody(n.Body) + + case *ast.ForStmt: + v.openScope() + defer v.closeScope() + v.walk(n.Init) + v.walk(n.Cond) + v.walk(n.Post) + v.walk(n.Body) + + case *ast.RangeStmt: + v.openScope() + defer v.closeScope() + v.walk(n.X) + var lhs []ast.Expr + if n.Key != nil { + lhs = append(lhs, n.Key) + } + if n.Value != nil { + lhs = append(lhs, n.Value) + } + if len(lhs) > 0 { + if n.Tok == token.DEFINE { + v.shortVarDecl(lhs) + } else { + walkSlice(v, lhs) + } + } + v.walk(n.Body) + + // Declarations + case *ast.GenDecl: + switch n.Tok { + case token.CONST, token.VAR: + for _, spec := range n.Specs { + spec := spec.(*ast.ValueSpec) + walkSlice(v, spec.Values) + if spec.Type != nil { + v.walk(spec.Type) + } + v.declare(spec.Names...) + } + case token.TYPE: + for _, spec := range n.Specs { + spec := spec.(*ast.TypeSpec) + // Go spec: The scope of a type identifier declared inside a + // function begins at the identifier in the TypeSpec and ends + // at the end of the innermost containing block. + v.declare(spec.Name) + if spec.TypeParams != nil { + v.openScope() + defer v.closeScope() + v.walkTypeParams(spec.TypeParams) + } + v.walk(spec.Type) + } + + case token.IMPORT: + panic("encountered import declaration in free analysis") + } + + case *ast.FuncDecl: + panic("encountered top-level function declaration in free analysis") + + default: + return v + } + + return nil +} + +func (r *freeVisitor) openScope() { + r.scope = &scope{map[string]bool{}, r.scope} +} + +func (r *freeVisitor) closeScope() { + r.scope = r.scope.outer +} + +func (r *freeVisitor) walk(n ast.Node) { + if n != nil { + ast.Walk(r, n) + } +} + +// walkFuncType walks a function type. It is used for explicit +// function types, like this: +// +// type RunFunc func(context.Context) error +// +// and function literals, like this: +// +// func(a, b int) int { return a + b} +// +// neither of which have type parameters. +// Function declarations do involve type parameters, but we don't +// handle them. +func (r *freeVisitor) walkFuncType(typ *ast.FuncType) { + // The order here doesn't really matter, because names in + // a field list cannot appear in types. + // (The situation is different for type parameters, for which + // see [freeVisitor.walkTypeParams].) + r.resolveFieldList(typ.Params) + r.resolveFieldList(typ.Results) + r.declareFieldList(typ.Params) + r.declareFieldList(typ.Results) +} + +// walkTypeParams is like walkFieldList, but declares type parameters eagerly so +// that they may be resolved in the constraint expressions held in the field +// Type. +func (r *freeVisitor) walkTypeParams(list *ast.FieldList) { + r.declareFieldList(list) + r.resolveFieldList(list) +} + +func (r *freeVisitor) walkBody(body *ast.BlockStmt) { + if body == nil { + return + } + walkSlice(r, body.List) +} + +func (r *freeVisitor) walkFieldList(list *ast.FieldList) { + if list == nil { + return + } + r.resolveFieldList(list) // .Type may contain references + r.declareFieldList(list) // .Names declares names +} + +func (r *freeVisitor) shortVarDecl(lhs []ast.Expr) { + // Go spec: A short variable declaration may redeclare variables provided + // they were originally declared in the same block with the same type, and + // at least one of the non-blank variables is new. + // + // However, it doesn't matter to free analysis whether a variable is declared + // fresh or redeclared. + for _, x := range lhs { + // In a well-formed program each expr must be an identifier, + // but be forgiving. + if id, ok := x.(*ast.Ident); ok { + r.declare(id) + } + } +} + +func walkSlice[S ~[]E, E ast.Node](r *freeVisitor, list S) { + for _, e := range list { + r.walk(e) + } +} + +// resolveFieldList resolves the types of the fields in list. +// The companion method declareFieldList declares the names of the fields. +func (r *freeVisitor) resolveFieldList(list *ast.FieldList) { + if list == nil { + return + } + for _, f := range list.List { + r.walk(f.Type) + } +} + +// declareFieldList declares the names of the fields in list. +// (Names in a FieldList always establish new bindings.) +// The companion method resolveFieldList resolves the types of the fields. +func (r *freeVisitor) declareFieldList(list *ast.FieldList) { + if list == nil { + return + } + for _, f := range list.List { + r.declare(f.Names...) + } +} + +// resolve marks ident as free if it is not in scope. +// TODO(jba): rename: no resolution is happening. +func (r *freeVisitor) resolve(ident *ast.Ident) { + if s := ident.Name; s != "_" && !r.scope.defined(s) { + r.free[s] = true + } +} + +// declare adds each non-blank ident to the current scope. +func (r *freeVisitor) declare(idents ...*ast.Ident) { + for _, id := range idents { + if id.Name != "_" { + r.scope.names[id.Name] = true + } + } +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/inline.go b/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/inline.go new file mode 100644 index 00000000000..2443504da7a --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/inline.go @@ -0,0 +1,3744 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inline + +import ( + "bytes" + "fmt" + "go/ast" + "go/constant" + "go/format" + "go/parser" + "go/printer" + "go/token" + "go/types" + "maps" + pathpkg "path" + "reflect" + "slices" + "strconv" + "strings" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/go/types/typeutil" + internalastutil "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/packagepath" + "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal" +) + +// A Caller describes the function call and its enclosing context. +// +// The client is responsible for populating this struct and passing it to Inline. +type Caller struct { + Fset *token.FileSet + Types *types.Package + Info *types.Info + File *ast.File + Call *ast.CallExpr + Content []byte // source of file containing + + path []ast.Node // path from call to root of file syntax tree + enclosingFunc *ast.FuncDecl // top-level function/method enclosing the call, if any +} + +type logger = func(string, ...any) + +// Options specifies parameters affecting the inliner algorithm. +// All fields are optional. +type Options struct { + Logf logger // log output function, records decision-making process + IgnoreEffects bool // ignore potential side effects of arguments (unsound) +} + +// Result holds the result of code transformation. +type Result struct { + Content []byte // formatted, transformed content of caller file + Literalized bool // chosen strategy replaced callee() with func(){...}() + BindingDecl bool // transformation added "var params = args" declaration + + // TODO(adonovan): provide an API for clients that want structured + // output: a list of import additions and deletions plus one or more + // localized diffs (or even AST transformations, though ownership and + // mutation are tricky) near the call site. +} + +// Inline inlines the called function (callee) into the function call (caller) +// and returns the updated, formatted content of the caller source file. +// +// Inline does not mutate any public fields of Caller or Callee. +func Inline(caller *Caller, callee *Callee, opts *Options) (*Result, error) { + copy := *opts // shallow copy + opts = © + // Set default options. + if opts.Logf == nil { + opts.Logf = func(string, ...any) {} + } + + st := &state{ + caller: caller, + callee: callee, + opts: opts, + } + return st.inline() +} + +// state holds the working state of the inliner. +type state struct { + caller *Caller + callee *Callee + opts *Options +} + +func (st *state) inline() (*Result, error) { + logf, caller, callee := st.opts.Logf, st.caller, st.callee + + logf("inline %s @ %v", + debugFormatNode(caller.Fset, caller.Call), + caller.Fset.PositionFor(caller.Call.Lparen, false)) + + if !consistentOffsets(caller) { + return nil, fmt.Errorf("internal error: caller syntax positions are inconsistent with file content (did you forget to use FileSet.PositionFor when computing the file name?)") + } + + // Break the string literal so we can use inlining in this file. :) + if ast.IsGenerated(caller.File) && + bytes.Contains(caller.Content, []byte("// Code generated by "+"cmd/cgo; DO NOT EDIT.")) { + return nil, fmt.Errorf("cannot inline calls from files that import \"C\"") + } + + res, err := st.inlineCall() + if err != nil { + return nil, err + } + + // Replace the call (or some node that encloses it) by new syntax. + assert(res.old != nil, "old is nil") + assert(res.new != nil, "new is nil") + + // A single return operand inlined to a unary + // expression context may need parens. Otherwise: + // func two() int { return 1+1 } + // print(-two()) => print(-1+1) // oops! + // + // Usually it is not necessary to insert ParenExprs + // as the formatter is smart enough to insert them as + // needed by the context. But the res.{old,new} + // substitution is done by formatting res.new in isolation + // and then splicing its text over res.old, so the + // formatter doesn't see the parent node and cannot do + // the right thing. (One solution would be to always + // format the enclosing node of old, but that requires + // non-lossy comment handling, #20744.) + // + // So, we must analyze the call's context + // to see whether ambiguity is possible. + // For example, if the context is x[y:z], then + // the x subtree is subject to precedence ambiguity + // (replacing x by p+q would give p+q[y:z] which is wrong) + // but the y and z subtrees are safe. + if needsParens(caller.path, res.old, res.new) { + res.new = &ast.ParenExpr{X: res.new.(ast.Expr)} + } + + // Some reduction strategies return a new block holding the + // callee's statements. The block's braces may be elided when + // there is no conflict between names declared in the block + // with those declared by the parent block, and no risk of + // a caller's goto jumping forward across a declaration. + // + // This elision is only safe when the ExprStmt is beneath a + // BlockStmt, CaseClause.Body, or CommClause.Body; + // (see "statement theory"). + // + // The inlining analysis may have already determined that eliding braces is + // safe. Otherwise, we analyze its safety here. + elideBraces := res.elideBraces + if !elideBraces { + if newBlock, ok := res.new.(*ast.BlockStmt); ok { + i := slices.Index(caller.path, res.old) + parent := caller.path[i+1] + var body []ast.Stmt + switch parent := parent.(type) { + case *ast.BlockStmt: + body = parent.List + case *ast.CommClause: + body = parent.Body + case *ast.CaseClause: + body = parent.Body + } + if body != nil { + callerNames := declares(body) + + // If BlockStmt is a function body, + // include its receiver, params, and results. + addFieldNames := func(fields *ast.FieldList) { + if fields != nil { + for _, field := range fields.List { + for _, id := range field.Names { + callerNames[id.Name] = true + } + } + } + } + switch f := caller.path[i+2].(type) { + case *ast.FuncDecl: + addFieldNames(f.Recv) + addFieldNames(f.Type.Params) + addFieldNames(f.Type.Results) + case *ast.FuncLit: + addFieldNames(f.Type.Params) + addFieldNames(f.Type.Results) + } + + if len(callerLabels(caller.path)) > 0 { + // TODO(adonovan): be more precise and reject + // only forward gotos across the inlined block. + logf("keeping block braces: caller uses control labels") + } else if intersects(declares(newBlock.List), callerNames) { + logf("keeping block braces: avoids name conflict") + } else { + elideBraces = true + } + } + } + } + + // File rewriting. This proceeds in multiple passes, in order to maximally + // preserve comment positioning. (This could be greatly simplified once + // comments are stored in the tree.) + // + // Don't call replaceNode(caller.File, res.old, res.new) + // as it mutates the caller's syntax tree. + // Instead, splice the file, replacing the extent of the "old" + // node by a formatting of the "new" node, and re-parse. + // We'll fix up the imports on this new tree, and format again. + // + // Inv: f is the result of parsing content, using fset. + var ( + content = caller.Content + fset = caller.Fset + f *ast.File // parsed below + ) + reparse := func() error { + const mode = parser.ParseComments | parser.SkipObjectResolution | parser.AllErrors + f, err = parser.ParseFile(fset, "callee.go", content, mode) + if err != nil { + // Something has gone very wrong. + logf("failed to reparse <<%s>>: %v", string(content), err) // debugging + return err + } + return nil + } + { + start := offsetOf(fset, res.old.Pos()) + end := offsetOf(fset, res.old.End()) + var out bytes.Buffer + out.Write(content[:start]) + // TODO(adonovan): might it make more sense to use + // callee.Fset when formatting res.new? + // The new tree is a mix of (cloned) caller nodes for + // the argument expressions and callee nodes for the + // function body. In essence the question is: which + // is more likely to have comments? + // Usually the callee body will be larger and more + // statement-heavy than the arguments, but a + // strategy may widen the scope of the replacement + // (res.old) from CallExpr to, say, its enclosing + // block, so the caller nodes dominate. + // Precise comment handling would make this a + // non-issue. Formatting wouldn't really need a + // FileSet at all. + if elideBraces { + for i, stmt := range res.new.(*ast.BlockStmt).List { + if i > 0 { + out.WriteByte('\n') + } + if err := format.Node(&out, fset, stmt); err != nil { + return nil, err + } + } + } else { + if err := format.Node(&out, fset, res.new); err != nil { + return nil, err + } + } + out.Write(content[end:]) + content = out.Bytes() + if err := reparse(); err != nil { + return nil, err + } + } + + // Add new imports that are still used. + newImports := trimNewImports(res.newImports, res.new) + // Insert new imports after last existing import, + // to avoid migration of pre-import comments. + // The imports will be organized below. + if len(newImports) > 0 { + // If we have imports to add, do so independent of the rest of the file. + // Otherwise, the length of the new imports may consume floating comments, + // causing them to be printed inside the imports block. + var ( + importDecl *ast.GenDecl + comments []*ast.CommentGroup // relevant comments. + before, after []byte // pre- and post-amble for the imports block. + ) + if len(f.Imports) > 0 { + // Append specs to existing import decl + importDecl = f.Decls[0].(*ast.GenDecl) + for _, comment := range f.Comments { + // Filter comments. Don't use CommentMap.Filter here, because we don't + // want to include comments that document the import decl itself, for + // example: + // + // // We don't want this comment to be duplicated. + // import ( + // "something" + // ) + if importDecl.Pos() <= comment.Pos() && comment.Pos() < importDecl.End() { + comments = append(comments, comment) + } + } + before = content[:offsetOf(fset, importDecl.Pos())] + importDecl.Doc = nil // present in before + after = content[offsetOf(fset, importDecl.End()):] + } else { + // Insert new import decl. + importDecl = &ast.GenDecl{Tok: token.IMPORT} + f.Decls = prepend[ast.Decl](importDecl, f.Decls...) + + // Make room for the new declaration after the package declaration. + pkgEnd := f.Name.End() + file := fset.File(pkgEnd) + if file == nil { + logf("internal error: missing pkg file") + return nil, fmt.Errorf("missing pkg file for %s", f.Name.Name) + } + // Preserve any comments after the package declaration, by splicing in + // the new import block after the end of the package declaration line. + line := file.Line(pkgEnd) + if line < len(file.Lines()) { // line numbers are 1-based + nextLinePos := file.LineStart(line + 1) + nextLine := offsetOf(fset, nextLinePos) + before = slices.Concat(content[:nextLine], []byte("\n")) + after = slices.Concat([]byte("\n\n"), content[nextLine:]) + } else { + before = slices.Concat(content, []byte("\n\n")) + } + } + // Add new imports. + // Set their position to after the last position of the old imports, to keep + // comments on the old imports from moving. + lastPos := token.NoPos + if lastSpec := last(importDecl.Specs); lastSpec != nil { + lastPos = lastSpec.Pos() + if c := lastSpec.(*ast.ImportSpec).Comment; c != nil { + lastPos = c.Pos() + } + } + for _, imp := range newImports { + // Check that the new imports are accessible. + path, _ := strconv.Unquote(imp.spec.Path.Value) + if !packagepath.CanImport(caller.Types.Path(), path) { + return nil, fmt.Errorf("can't inline function %v as its body refers to inaccessible package %q", callee, path) + } + if lastPos.IsValid() { + lastPos++ + imp.spec.Path.ValuePos = lastPos + } + importDecl.Specs = append(importDecl.Specs, imp.spec) + } + + var out bytes.Buffer + out.Write(before) + commented := &printer.CommentedNode{ + Node: importDecl, + Comments: comments, + } + + if err := format.Node(&out, fset, commented); err != nil { + logf("failed to format new importDecl: %v", err) // debugging + return nil, err + } + out.Write(after) + content = out.Bytes() + if err := reparse(); err != nil { + return nil, err + } + } + // Delete imports referenced only by caller.Call.Fun. + for _, oldImport := range res.oldImports { + specToDelete := oldImport.spec + name := "" + if specToDelete.Name != nil { + name = specToDelete.Name.Name + } + path, _ := strconv.Unquote(specToDelete.Path.Value) + astutil.DeleteNamedImport(caller.Fset, f, name, path) + } + + var out bytes.Buffer + if err := format.Node(&out, caller.Fset, f); err != nil { + return nil, err + } + newSrc := out.Bytes() + + literalized := false + if call, ok := res.new.(*ast.CallExpr); ok && is[*ast.FuncLit](call.Fun) { + literalized = true + } + + return &Result{ + Content: newSrc, + Literalized: literalized, + BindingDecl: res.bindingDecl, + }, nil +} + +// An oldImport is an import that will be deleted from the caller file. +type oldImport struct { + pkgName *types.PkgName + spec *ast.ImportSpec +} + +// A newImport is an import that will be added to the caller file. +type newImport struct { + pkgName string + spec *ast.ImportSpec +} + +// importState tracks information about imports. +type importState struct { + logf func(string, ...any) + caller *Caller + importMap map[string][]string // from package paths in the caller's file to local names + newImports []newImport // for references to free names in callee; to be added to the file + oldImports []oldImport // referenced only by caller.Call.Fun; to be removed from the file +} + +// newImportState returns an importState with initial information about the caller's imports. +func newImportState(logf func(string, ...any), caller *Caller, callee *gobCallee) *importState { + // For simplicity we ignore existing dot imports, so that a qualified + // identifier (QI) in the callee is always represented by a QI in the caller, + // allowing us to treat a QI like a selection on a package name. + is := &importState{ + logf: logf, + caller: caller, + importMap: make(map[string][]string), + } + + for _, imp := range caller.File.Imports { + if pkgName, ok := importedPkgName(caller.Info, imp); ok && + pkgName.Name() != "." && + pkgName.Name() != "_" { + + // If the import's sole use is in caller.Call.Fun of the form p.F(...), + // where p.F is a qualified identifier, the p import may not be + // necessary. + // + // Only the qualified identifier case matters, as other references to + // imported package names in the Call.Fun expression (e.g. + // x.after(3*time.Second).f() or time.Second.String()) will remain after + // inlining, as arguments. + // + // If that is the case, proactively check if any of the callee FreeObjs + // need this import. Doing so eagerly simplifies the resulting logic. + needed := true + sel, ok := ast.Unparen(caller.Call.Fun).(*ast.SelectorExpr) + if ok && soleUse(caller.Info, pkgName) == sel.X { + needed = false // no longer needed by caller + // Check to see if any of the inlined free objects need this package. + for _, obj := range callee.FreeObjs { + if obj.PkgPath == pkgName.Imported().Path() && obj.Shadow[pkgName.Name()] == 0 { + needed = true // needed by callee + break + } + } + } + + // Exclude imports not needed by the caller or callee after inlining; the second + // return value holds these. + if needed { + path := pkgName.Imported().Path() + is.importMap[path] = append(is.importMap[path], pkgName.Name()) + } else { + is.oldImports = append(is.oldImports, oldImport{pkgName: pkgName, spec: imp}) + } + } + } + return is +} + +// importName finds an existing import name to use in a particular shadowing +// context. It is used to determine the set of new imports in +// localName, and is also used for writing out names in inlining +// strategies below. +func (i *importState) importName(pkgPath string, shadow shadowMap) string { + for _, name := range i.importMap[pkgPath] { + // Check that either the import preexisted, or that it was newly added + // (no PkgName) but is not shadowed, either in the callee (shadows) or + // caller (caller.lookup). + if shadow[name] == 0 { + found := i.caller.lookup(name) + if is[*types.PkgName](found) || found == nil { + return name + } + } + } + return "" +} + +// localName returns the local name for a given imported package path, +// adding one if it doesn't exists. +func (i *importState) localName(pkgPath, pkgName string, shadow shadowMap) string { + // Does an import already exist that works in this shadowing context? + if name := i.importName(pkgPath, shadow); name != "" { + return name + } + + newlyAdded := func(name string) bool { + return slices.ContainsFunc(i.newImports, func(n newImport) bool { return n.pkgName == name }) + } + + // shadowedInCaller reports whether a candidate package name + // already refers to a declaration in the caller. + shadowedInCaller := func(name string) bool { + obj := i.caller.lookup(name) + if obj == nil { + return false + } + // If obj will be removed, the name is available. + return !slices.ContainsFunc(i.oldImports, func(o oldImport) bool { return o.pkgName == obj }) + } + + // import added by callee + // + // Choose local PkgName based on last segment of + // package path plus, if needed, a numeric suffix to + // ensure uniqueness. + // + // "init" is not a legal PkgName. + // + // TODO(rfindley): is it worth preserving local package names for callee + // imports? Are they likely to be better or worse than the name we choose + // here? + base := pkgName + name := base + for n := 0; shadow[name] != 0 || shadowedInCaller(name) || newlyAdded(name) || name == "init"; n++ { + name = fmt.Sprintf("%s%d", base, n) + } + i.logf("adding import %s %q", name, pkgPath) + spec := &ast.ImportSpec{ + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(pkgPath), + }, + } + // Use explicit pkgname (out of necessity) when it differs from the declared name, + // or (for good style) when it differs from base(pkgpath). + if name != pkgName || name != pathpkg.Base(pkgPath) { + spec.Name = makeIdent(name) + } + i.newImports = append(i.newImports, newImport{ + pkgName: name, + spec: spec, + }) + i.importMap[pkgPath] = append(i.importMap[pkgPath], name) + return name +} + +// trimNewImports removes imports that are no longer needed. +// +// The list of new imports as constructed by calls to [importState.localName] +// includes all of the packages referenced by the callee. +// But in the process of inlining, we may have dropped some of those references. +// For example, if the callee looked like this: +// +// func F(x int) (p.T) {... /* no mention of p */ ...} +// +// and we inlined by assignment: +// +// v := ... +// +// then the reference to package p drops away. +// +// Remove the excess imports by seeing which remain in new, the expression +// to be inlined. +// We can find those by looking at the free names in new. +// The list of free names cannot include spurious package names. +// Free-name tracking is precise except for the case of an identifier +// key in a composite literal, which names either a field or a value. +// Neither fields nor values are package names. +// Since they are not relevant to removing unused imports, we instruct +// freeishNames to omit composite-literal keys that are identifiers. +func trimNewImports(newImports []newImport, new ast.Node) []newImport { + free := map[string]bool{} + const omitComplitIdents = false + freeishNames(free, new, omitComplitIdents) + var res []newImport + for _, ni := range newImports { + if free[ni.pkgName] { + res = append(res, ni) + } + } + return res +} + +type inlineCallResult struct { + newImports []newImport // to add + oldImports []oldImport // to remove + + // If elideBraces is set, old is an ast.Stmt and new is an ast.BlockStmt to + // be spliced in. This allows the inlining analysis to assert that inlining + // the block is OK; if elideBraces is unset and old is an ast.Stmt and new is + // an ast.BlockStmt, braces may still be elided if the post-processing + // analysis determines that it is safe to do so. + // + // Ideally, it would not be necessary for the inlining analysis to "reach + // through" to the post-processing pass in this way. Instead, inlining could + // just set old to be an ast.BlockStmt and rewrite the entire BlockStmt, but + // unfortunately in order to preserve comments, it is important that inlining + // replace as little syntax as possible. + elideBraces bool + bindingDecl bool // transformation inserted "var params = args" declaration + old, new ast.Node // e.g. replace call expr by callee function body expression +} + +// inlineCall returns a pair of an old node (the call, or something +// enclosing it) and a new node (its replacement, which may be a +// combination of caller, callee, and new nodes), along with the set +// of new imports needed. +// +// TODO(adonovan): rethink the 'result' interface. The assumption of a +// one-to-one replacement seems fragile. One can easily imagine the +// transformation replacing the call and adding new variable +// declarations, for example, or replacing a call statement by zero or +// many statements.) +// NOTE(rfindley): we've sort-of done this, with the 'elideBraces' flag that +// allows inlining a statement list. However, due to loss of comments, more +// sophisticated rewrites are challenging. +// +// TODO(adonovan): in earlier drafts, the transformation was expressed +// by splicing substrings of the two source files because syntax +// trees don't preserve comments faithfully (see #20744), but such +// transformations don't compose. The current implementation is +// tree-based but is very lossy wrt comments. It would make a good +// candidate for evaluating an alternative fully self-contained tree +// representation, such as any proposed solution to #20744, or even +// dst or some private fork of go/ast.) +// TODO(rfindley): see if we can reduce the amount of comment lossiness by +// using printer.CommentedNode, which has been useful elsewhere. +// +// TODO(rfindley): inlineCall is getting very long, and very stateful, making +// it very hard to read. The following refactoring may improve readability and +// maintainability: +// - Rename 'state' to 'callsite', since that is what it encapsulates. +// - Add results of pre-processing analysis into the callsite struct, such as +// the effective importMap, new/old imports, arguments, etc. Essentially +// anything that resulted from initial analysis of the call site, and which +// may be useful to inlining strategies. +// - Delegate this call site analysis to a constructor or initializer, such +// as 'analyzeCallsite', so that it does not consume bandwidth in the +// 'inlineCall' logical flow. +// - Once analyzeCallsite returns, the callsite is immutable, much in the +// same way as the Callee and Caller are immutable. +// - Decide on a standard interface for strategies (and substrategies), such +// that they may be delegated to a separate method on callsite. +// +// In this way, the logical flow of inline call will clearly follow the +// following structure: +// 1. Analyze the call site. +// 2. Try strategies, in order, until one succeeds. +// 3. Process the results. +// +// If any expensive analysis may be avoided by earlier strategies, it can be +// encapsulated in its own type and passed to subsequent strategies. +func (st *state) inlineCall() (*inlineCallResult, error) { + logf, caller, callee := st.opts.Logf, st.caller, &st.callee.impl + + checkInfoFields(caller.Info) + + // Inlining of dynamic calls is not currently supported, + // even for local closure calls. (This would be a lot of work.) + calleeSymbol := typeutil.StaticCallee(caller.Info, caller.Call) + if calleeSymbol == nil { + // e.g. interface method + return nil, fmt.Errorf("cannot inline: not a static function call") + } + + // Reject cross-package inlining if callee has + // free references to unexported symbols. + samePkg := caller.Types.Path() == callee.PkgPath + if !samePkg && len(callee.Unexported) > 0 { + return nil, fmt.Errorf("cannot inline call to %s because body refers to non-exported %s", + callee.Name, callee.Unexported[0]) + } + + // -- analyze callee's free references in caller context -- + + // Compute syntax path enclosing Call, innermost first (Path[0]=Call), + // and outermost enclosing function, if any. + caller.path, _ = astutil.PathEnclosingInterval(caller.File, caller.Call.Pos(), caller.Call.End()) + for _, n := range caller.path { + if decl, ok := n.(*ast.FuncDecl); ok { + caller.enclosingFunc = decl + break + } + } + + // If call is within a function, analyze all its + // local vars for the "single assignment" property. + // (Taking the address &v counts as a potential assignment.) + var assign1 func(v *types.Var) bool // reports whether v a single-assignment local var + { + updatedLocals := make(map[*types.Var]bool) + if caller.enclosingFunc != nil { + escape(caller.Info, caller.enclosingFunc, func(v *types.Var, _ bool) { + updatedLocals[v] = true + }) + logf("multiple-assignment vars: %v", updatedLocals) + } + assign1 = func(v *types.Var) bool { return !updatedLocals[v] } + } + + // Extract information about the caller's imports. + istate := newImportState(logf, caller, callee) + + // Compute the renaming of the callee's free identifiers. + objRenames, err := st.renameFreeObjs(istate) + if err != nil { + return nil, err + } + + res := &inlineCallResult{ + newImports: istate.newImports, + oldImports: istate.oldImports, + } + + // Parse callee function declaration. + calleeFset, calleeDecl, err := parseCompact(callee.Content) + if err != nil { + return nil, err // "can't happen" + } + + // replaceCalleeID replaces an identifier in the callee. See [replacer] for + // more detailed semantics. + replaceCalleeID := func(offset int, repl ast.Expr, unpackVariadic bool) { + path, id := findIdent(calleeDecl, calleeDecl.Pos()+token.Pos(offset)) + logf("- replace id %q @ #%d to %q", id.Name, offset, debugFormatNode(calleeFset, repl)) + // Replace f([]T{a, b, c}...) with f(a, b, c). + if lit, ok := repl.(*ast.CompositeLit); ok && unpackVariadic && len(path) > 0 { + if call, ok := last(path).(*ast.CallExpr); ok && + call.Ellipsis.IsValid() && + id == last(call.Args) { + + call.Args = append(call.Args[:len(call.Args)-1], lit.Elts...) + call.Ellipsis = token.NoPos + return + } + } + replaceNode(calleeDecl, id, repl) + } + + // Generate replacements for each free identifier. + // (The same tree may be spliced in multiple times, resulting in a DAG.) + for _, ref := range callee.FreeRefs { + if repl := objRenames[ref.Object]; repl != nil { + replaceCalleeID(ref.Offset, repl, false) + } + } + + // Gather the effective call arguments, including the receiver. + // Later, elements will be eliminated (=> nil) by parameter substitution. + args, err := st.arguments(caller, calleeDecl, assign1) + if err != nil { + return nil, err // e.g. implicit field selection cannot be made explicit + } + + // Gather effective parameter tuple, including the receiver if any. + // Simplify variadic parameters to slices (in all cases but one). + var params []*parameter // including receiver; nil => parameter substituted + { + sig := calleeSymbol.Type().(*types.Signature) + if sig.Recv() != nil { + params = append(params, ¶meter{ + obj: sig.Recv(), + fieldType: calleeDecl.Recv.List[0].Type, + info: callee.Params[0], + }) + } + + // Flatten the list of syntactic types. + var types []ast.Expr + for _, field := range calleeDecl.Type.Params.List { + if field.Names == nil { + types = append(types, field.Type) + } else { + for range field.Names { + types = append(types, field.Type) + } + } + } + + for i := 0; i < sig.Params().Len(); i++ { + params = append(params, ¶meter{ + obj: sig.Params().At(i), + fieldType: types[i], + info: callee.Params[len(params)], + }) + } + + // Variadic function? + // + // There are three possible types of call: + // - ordinary f(a1, ..., aN) + // - ellipsis f(a1, ..., slice...) + // - spread f(recv?, g()) where g() is a tuple. + // The first two are desugared to non-variadic calls + // with an ordinary slice parameter; + // the third is tricky and cannot be reduced, and (if + // a receiver is present) cannot even be literalized. + // Fortunately it is vanishingly rare. + // + // TODO(adonovan): extract this to a function. + if sig.Variadic() { + lastParam := last(params) + if len(args) > 0 && last(args).spread { + // spread call to variadic: tricky + lastParam.variadic = true + } else { + // ordinary/ellipsis call to variadic + + // simplify decl: func(T...) -> func([]T) + lastParamField := last(calleeDecl.Type.Params.List) + lastParamField.Type = &ast.ArrayType{ + Elt: lastParamField.Type.(*ast.Ellipsis).Elt, + } + + if caller.Call.Ellipsis.IsValid() { + // ellipsis call: f(slice...) -> f(slice) + // nop + } else { + // ordinary call: f(a1, ... aN) -> f([]T{a1, ..., aN}) + // + // Substitution of []T{...} in the callee body may lead to + // g([]T{a1, ..., aN}...), which we simplify to g(a1, ..., an) + // later; see replaceCalleeID. + n := len(params) - 1 + ordinary, extra := args[:n], args[n:] + var elts []ast.Expr + freevars := make(map[string]bool) + pure, effects := true, false + for _, arg := range extra { + elts = append(elts, arg.expr) + pure = pure && arg.pure + effects = effects || arg.effects + maps.Copy(freevars, arg.freevars) + } + args = append(ordinary, &argument{ + expr: &ast.CompositeLit{ + Type: lastParamField.Type, + Elts: elts, + }, + typ: lastParam.obj.Type(), + constant: nil, + pure: pure, + effects: effects, + duplicable: false, + freevars: freevars, + variadic: true, + }) + } + } + } + } + + typeArgs := st.typeArguments(caller.Call) + if len(typeArgs) != len(callee.TypeParams) { + return nil, fmt.Errorf("cannot inline: type parameter inference is not yet supported") + } + if err := substituteTypeParams(logf, callee.TypeParams, typeArgs, params, replaceCalleeID); err != nil { + return nil, err + } + + // Log effective arguments. + for i, arg := range args { + logf("arg #%d: %s pure=%t effects=%t duplicable=%t free=%v type=%v", + i, debugFormatNode(caller.Fset, arg.expr), + arg.pure, arg.effects, arg.duplicable, arg.freevars, arg.typ) + } + + // Note: computation below should be expressed in terms of + // the args and params slices, not the raw material. + + // Perform parameter substitution. + // May eliminate some elements of params/args. + substitute(logf, caller, params, args, callee.Effects, callee.Falcon, replaceCalleeID) + + // Update the callee's signature syntax. + updateCalleeParams(calleeDecl, params) + + // Create a var (param = arg; ...) decl for use by some strategies. + bindingDecl := createBindingDecl(logf, caller, args, calleeDecl, callee.Results) + + var remainingArgs []ast.Expr + for _, arg := range args { + if arg != nil { + remainingArgs = append(remainingArgs, arg.expr) + } + } + + // -- let the inlining strategies begin -- + // + // When we commit to a strategy, we log a message of the form: + // + // "strategy: reduce expr-context call to { return expr }" + // + // This is a terse way of saying: + // + // we plan to reduce a call + // that appears in expression context + // to a function whose body is of the form { return expr } + + // TODO(adonovan): split this huge function into a sequence of + // function calls with an error sentinel that means "try the + // next strategy", and make sure each strategy writes to the + // log the reason it didn't match. + + // Special case: eliminate a call to a function whose body is empty. + // (=> callee has no results and caller is a statement.) + // + // func f(params) {} + // f(args) + // => _, _ = args + // + if len(calleeDecl.Body.List) == 0 { + logf("strategy: reduce call to empty body") + + // Evaluate the arguments for effects and delete the call entirely. + // Note(golang/go#71486): stmt can be nil if the call is in a go or defer + // statement. + // TODO: discard go or defer statements as well. + if stmt := callStmt(caller.path, false); stmt != nil { + res.old = stmt + if nargs := len(remainingArgs); nargs > 0 { + // Emit "_, _ = args" to discard results. + + // TODO(adonovan): if args is the []T{a1, ..., an} + // literal synthesized during variadic simplification, + // consider unwrapping it to its (pure) elements. + // Perhaps there's no harm doing this for any slice literal. + + // Make correction for spread calls + // f(g()) or recv.f(g()) where g() is a tuple. + if last := last(args); last != nil && last.spread { + nspread := last.typ.(*types.Tuple).Len() + if len(args) > 1 { // [recv, g()] + // A single AssignStmt cannot discard both, so use a 2-spec var decl. + res.new = &ast.GenDecl{ + Tok: token.VAR, + Specs: []ast.Spec{ + &ast.ValueSpec{ + Names: []*ast.Ident{makeIdent("_")}, + Values: []ast.Expr{args[0].expr}, + }, + &ast.ValueSpec{ + Names: blanks[*ast.Ident](nspread), + Values: []ast.Expr{args[1].expr}, + }, + }, + } + return res, nil + } + + // Sole argument is spread call. + nargs = nspread + } + + res.new = &ast.AssignStmt{ + Lhs: blanks[ast.Expr](nargs), + Tok: token.ASSIGN, + Rhs: remainingArgs, + } + + } else { + // No remaining arguments: delete call statement entirely + res.new = &ast.EmptyStmt{} + } + return res, nil + } + } + + // If all parameters have been substituted and no result + // variable is referenced, we don't need a binding decl. + // This may enable better reduction strategies. + allResultsUnreferenced := forall(callee.Results, func(i int, r *paramInfo) bool { return len(r.Refs) == 0 }) + needBindingDecl := !allResultsUnreferenced || + exists(params, func(i int, p *parameter) bool { return p != nil }) + + // The two strategies below overlap for a tail call of {return exprs}: + // The expr-context reduction is nice because it keeps the + // caller's return stmt and merely switches its operand, + // without introducing a new block, but it doesn't work with + // implicit return conversions. + // + // TODO(adonovan): unify these cases more cleanly, allowing return- + // operand replacement and implicit conversions, by adding + // conversions around each return operand (if not a spread return). + + // Special case: call to { return exprs }. + // + // Reduces to: + // { var (bindings); _, _ = exprs } + // or _, _ = exprs + // or expr + // + // If: + // - the body is just "return expr" with trivial implicit conversions, + // or the caller's return type matches the callee's, + // - all parameters and result vars can be eliminated + // or replaced by a binding decl, + // then the call expression can be replaced by the + // callee's body expression, suitably substituted. + if len(calleeDecl.Body.List) == 1 && + is[*ast.ReturnStmt](calleeDecl.Body.List[0]) && + len(calleeDecl.Body.List[0].(*ast.ReturnStmt).Results) > 0 { // not a bare return + results := calleeDecl.Body.List[0].(*ast.ReturnStmt).Results + + parent, grandparent := callContext(caller.path) + + // statement context + if stmt, ok := parent.(*ast.ExprStmt); ok && + (!needBindingDecl || bindingDecl != nil) { + logf("strategy: reduce stmt-context call to { return exprs }") + clearPositions(calleeDecl.Body) + + if callee.ValidForCallStmt { + logf("callee body is valid as statement") + // Inv: len(results) == 1 + if !needBindingDecl { + // Reduces to: expr + res.old = caller.Call + res.new = results[0] + } else { + // Reduces to: { var (bindings); expr } + res.bindingDecl = true + res.old = stmt + res.new = &ast.BlockStmt{ + List: []ast.Stmt{ + bindingDecl.stmt, + &ast.ExprStmt{X: results[0]}, + }, + } + } + } else { + logf("callee body is not valid as statement") + // The call is a standalone statement, but the + // callee body is not suitable as a standalone statement + // (f() or <-ch), explicitly discard the results: + // Reduces to: _, _ = exprs + discard := &ast.AssignStmt{ + Lhs: blanks[ast.Expr](callee.NumResults), + Tok: token.ASSIGN, + Rhs: results, + } + res.old = stmt + if !needBindingDecl { + // Reduces to: _, _ = exprs + res.new = discard + } else { + // Reduces to: { var (bindings); _, _ = exprs } + res.bindingDecl = true + res.new = &ast.BlockStmt{ + List: []ast.Stmt{ + bindingDecl.stmt, + discard, + }, + } + } + } + return res, nil + } + + // Assignment context. + // + // If there is no binding decl, or if the binding decl declares no names, + // an assignment a, b := f() can be reduced to a, b := x, y. + if stmt, ok := parent.(*ast.AssignStmt); ok && + is[*ast.BlockStmt](grandparent) && + (!needBindingDecl || (bindingDecl != nil && len(bindingDecl.names) == 0)) { + + // Reduces to: { var (bindings); lhs... := rhs... } + if newStmts, ok := st.assignStmts(stmt, results, istate.importName); ok { + logf("strategy: reduce assign-context call to { return exprs }") + + clearPositions(calleeDecl.Body) + + block := &ast.BlockStmt{ + List: newStmts, + } + if needBindingDecl { + res.bindingDecl = true + block.List = prepend(bindingDecl.stmt, block.List...) + } + + // assignStmts does not introduce new bindings, and replacing an + // assignment only works if the replacement occurs in the same scope. + // Therefore, we must ensure that braces are elided. + res.elideBraces = true + res.old = stmt + res.new = block + return res, nil + } + } + + // expression context + if !needBindingDecl { + clearPositions(calleeDecl.Body) + + anyNonTrivialReturns := hasNonTrivialReturn(callee.Returns) + + if callee.NumResults == 1 { + logf("strategy: reduce expr-context call to { return expr }") + // (includes some simple tail-calls) + + // Make implicit return conversion explicit. + if anyNonTrivialReturns { + results[0] = convert(calleeDecl.Type.Results.List[0].Type, results[0]) + } + + res.old = caller.Call + res.new = results[0] + return res, nil + + } else if !anyNonTrivialReturns { + logf("strategy: reduce spread-context call to { return expr }") + // There is no general way to reify conversions in a spread + // return, hence the requirement above. + // + // TODO(adonovan): allow this reduction when no + // conversion is required by the context. + + // The call returns multiple results but is + // not a standalone call statement. It must + // be the RHS of a spread assignment: + // var x, y = f() + // x, y := f() + // x, y = f() + // or the sole argument to a spread call: + // printf(f()) + // or spread return statement: + // return f() + res.old = parent + switch context := parent.(type) { + case *ast.AssignStmt: + // Inv: the call must be in Rhs[0], not Lhs. + assign := shallowCopy(context) + assign.Rhs = results + res.new = assign + case *ast.ValueSpec: + // Inv: the call must be in Values[0], not Names. + spec := shallowCopy(context) + spec.Values = results + res.new = spec + case *ast.CallExpr: + // Inv: the call must be in Args[0], not Fun. + call := shallowCopy(context) + call.Args = results + res.new = call + case *ast.ReturnStmt: + // Inv: the call must be Results[0]. + ret := shallowCopy(context) + ret.Results = results + res.new = ret + default: + return nil, fmt.Errorf("internal error: unexpected context %T for spread call", context) + } + return res, nil + } + } + } + + // Special case: tail-call. + // + // Inlining: + // return f(args) + // where: + // func f(params) (results) { body } + // reduces to: + // { var (bindings); body } + // { body } + // so long as: + // - all parameters can be eliminated or replaced by a binding decl, + // - call is a tail-call; + // - all returns in body have trivial result conversions, + // or the caller's return type matches the callee's, + // - there is no label conflict; + // - no result variable is referenced by name, + // or implicitly by a bare return. + // + // The body may use defer, arbitrary control flow, and + // multiple returns. + // + // TODO(adonovan): add a strategy for a 'void tail + // call', i.e. a call statement prior to an (explicit + // or implicit) return. + parent, _ := callContext(caller.path) + if ret, ok := parent.(*ast.ReturnStmt); ok && + len(ret.Results) == 1 && + tailCallSafeReturn(caller, calleeSymbol, callee) && + !callee.HasBareReturn && + (!needBindingDecl || bindingDecl != nil) && + !hasLabelConflict(caller.path, callee.Labels) && + allResultsUnreferenced { + logf("strategy: reduce tail-call") + body := calleeDecl.Body + clearPositions(body) + if needBindingDecl { + res.bindingDecl = true + body.List = prepend(bindingDecl.stmt, body.List...) + } + res.old = ret + res.new = body + return res, nil + } + + // Special case: call to void function + // + // Inlining: + // f(args) + // where: + // func f(params) { stmts } + // reduces to: + // { var (bindings); stmts } + // { stmts } + // so long as: + // - callee is a void function (no returns) + // - callee does not use defer + // - there is no label conflict between caller and callee + // - all parameters and result vars can be eliminated + // or replaced by a binding decl, + // - caller ExprStmt is in unrestricted statement context. + if stmt := callStmt(caller.path, true); stmt != nil && + (!needBindingDecl || bindingDecl != nil) && + !callee.HasDefer && + !hasLabelConflict(caller.path, callee.Labels) && + len(callee.Returns) == 0 { + logf("strategy: reduce stmt-context call to { stmts }") + body := calleeDecl.Body + var repl ast.Stmt = body + clearPositions(repl) + if needBindingDecl { + body.List = prepend(bindingDecl.stmt, body.List...) + } + res.old = stmt + res.new = repl + return res, nil + } + + // TODO(adonovan): parameterless call to { stmts; return expr } + // from one of these contexts: + // x, y = f() + // x, y := f() + // var x, y = f() + // => + // var (x T1, y T2); { stmts; x, y = expr } + // + // Because the params are no longer declared simultaneously + // we need to check that (for example) x ∉ freevars(T2), + // in addition to the usual checks for arg/result conversions, + // complex control, etc. + // Also test cases where expr is an n-ary call (spread returns). + + // Literalization isn't quite infallible. + // Consider a spread call to a method in which + // no parameters are eliminated, e.g. + // new(T).f(g()) + // where + // func (recv *T) f(x, y int) { body } + // func g() (int, int) + // This would be literalized to: + // func (recv *T, x, y int) { body }(new(T), g()), + // which is not a valid argument list because g() must appear alone. + // Reject this case for now. + if len(args) == 2 && args[0] != nil && args[1] != nil && is[*types.Tuple](args[1].typ) { + return nil, fmt.Errorf("can't yet inline spread call to method") + } + + // Infallible general case: literalization. + // + // func(params) { body }(args) + // + logf("strategy: literalization") + funcLit := &ast.FuncLit{ + Type: calleeDecl.Type, + Body: calleeDecl.Body, + } + // clear positions before prepending the binding decl below, since the + // binding decl contains syntax from the caller and we must not mutate the + // caller. (This was a prior bug.) + clearPositions(funcLit) + + // Literalization can still make use of a binding + // decl as it gives a more natural reading order: + // + // func() { var params = args; body }() + // + // TODO(adonovan): relax the allResultsUnreferenced requirement + // by adding a parameter-only (no named results) binding decl. + if bindingDecl != nil && allResultsUnreferenced { + funcLit.Type.Params.List = nil + remainingArgs = nil + res.bindingDecl = true + funcLit.Body.List = prepend(bindingDecl.stmt, funcLit.Body.List...) + } + + // Emit a new call to a function literal in place of + // the callee name, with appropriate replacements. + newCall := &ast.CallExpr{ + Fun: funcLit, + Ellipsis: token.NoPos, // f(slice...) is always simplified + Args: remainingArgs, + } + res.old = caller.Call + res.new = newCall + return res, nil +} + +// renameFreeObjs computes the renaming of the callee's free identifiers. +// It returns a slice of names (identifiers or selector expressions) corresponding +// to the callee's free objects (gobCallee.FreeObjs). +func (st *state) renameFreeObjs(istate *importState) ([]ast.Expr, error) { + caller, callee := st.caller, &st.callee.impl + objRenames := make([]ast.Expr, len(callee.FreeObjs)) // nil => no change + for i, obj := range callee.FreeObjs { + // obj is a free object of the callee. + // + // Possible cases are: + // - builtin function, type, or value (e.g. nil, zero) + // => check not shadowed in caller. + // - package-level var/func/const/types + // => same package: check not shadowed in caller. + // => otherwise: import other package, form a qualified identifier. + // (Unexported cross-package references were rejected already.) + // - type parameter + // => not yet supported + // - pkgname + // => import other package and use its local name. + // + // There can be no free references to labels, fields, or methods. + + // Note that we must consider potential shadowing both + // at the caller side (caller.lookup) and, when + // choosing new PkgNames, within the callee (obj.shadow). + + var newName ast.Expr + if obj.Kind == "pkgname" { + // Use locally appropriate import, creating as needed. + n := istate.localName(obj.PkgPath, obj.PkgName, obj.Shadow) + newName = makeIdent(n) // imported package + } else if !obj.ValidPos { + // Built-in function, type, or value (e.g. nil, zero): + // check not shadowed at caller. + found := caller.lookup(obj.Name) // always finds something + if found.Pos().IsValid() { + return nil, fmt.Errorf("cannot inline, because the callee refers to built-in %q, which in the caller is shadowed by a %s (declared at line %d)", + obj.Name, objectKind(found), + caller.Fset.PositionFor(found.Pos(), false).Line) + } + + } else { + // Must be reference to package-level var/func/const/type, + // since type parameters are not yet supported. + qualify := false + if obj.PkgPath == callee.PkgPath { + // reference within callee package + if caller.Types.Path() == callee.PkgPath { + // Caller and callee are in same package. + // Check caller has not shadowed the decl. + // + // This may fail if the callee is "fake", such as for signature + // refactoring where the callee is modified to be a trivial wrapper + // around the refactored signature. + found := caller.lookup(obj.Name) + if found != nil && !isPkgLevel(found) { + return nil, fmt.Errorf("cannot inline, because the callee refers to %s %q, which in the caller is shadowed by a %s (declared at line %d)", + obj.Kind, obj.Name, + objectKind(found), + caller.Fset.PositionFor(found.Pos(), false).Line) + } + } else { + // Cross-package reference. + qualify = true + } + } else { + // Reference to a package-level declaration + // in another package, without a qualified identifier: + // it must be a dot import. + qualify = true + } + + // Form a qualified identifier, pkg.Name. + if qualify { + pkgName := istate.localName(obj.PkgPath, obj.PkgName, obj.Shadow) + newName = &ast.SelectorExpr{ + X: makeIdent(pkgName), + Sel: makeIdent(obj.Name), + } + } + } + objRenames[i] = newName + } + return objRenames, nil +} + +type argument struct { + expr ast.Expr + typ types.Type // may be tuple for sole non-receiver arg in spread call + constant constant.Value // value of argument if constant + spread bool // final arg is call() assigned to multiple params + pure bool // expr is pure (doesn't read variables) + effects bool // expr has effects (updates variables) + duplicable bool // expr may be duplicated + freevars map[string]bool // free names of expr + variadic bool // is explicit []T{...} for eliminated variadic + desugaredRecv bool // is *recv or &recv, where operator was elided +} + +// typeArguments returns the type arguments of the call. +// It only collects the arguments that are explicitly provided; it does +// not attempt type inference. +func (st *state) typeArguments(call *ast.CallExpr) []*argument { + var exprs []ast.Expr + switch d := ast.Unparen(call.Fun).(type) { + case *ast.IndexExpr: + exprs = []ast.Expr{d.Index} + case *ast.IndexListExpr: + exprs = d.Indices + default: + // No type arguments + return nil + } + var args []*argument + for _, e := range exprs { + arg := &argument{expr: e, freevars: freeVars(st.caller.Info, e)} + // Wrap the instantiating type in parens when it's not an + // ident or qualified ident to prevent "if x == struct{}" + // parsing ambiguity, or "T(x)" where T = "*int" or "func()" + // from misparsing. + // TODO(adonovan): this fails in cases where parens are disallowed, such as + // in the composite literal expression T{k: v}. + if _, ok := arg.expr.(*ast.Ident); !ok { + arg.expr = &ast.ParenExpr{X: arg.expr} + } + args = append(args, arg) + } + return args +} + +// arguments returns the effective arguments of the call. +// +// If the receiver argument and parameter have +// different pointerness, make the "&" or "*" explicit. +// +// Also, if x.f() is shorthand for promoted method x.y.f(), +// make the .y explicit in T.f(x.y, ...). +// +// Beware that: +// +// - a method can only be called through a selection, but only +// the first of these two forms needs special treatment: +// +// expr.f(args) -> ([&*]expr, args) MethodVal +// T.f(recv, args) -> ( expr, args) MethodExpr +// +// - the presence of a value in receiver-position in the call +// is a property of the caller, not the callee. A method +// (calleeDecl.Recv != nil) may be called like an ordinary +// function. +// +// - the types.Signatures seen by the caller (from +// StaticCallee) and by the callee (from decl type) +// differ in this case. +// +// In a spread call f(g()), the sole ordinary argument g(), +// always last in args, has a tuple type. +// +// We compute type-based predicates like pure, duplicable, +// freevars, etc, now, before we start modifying syntax. +func (st *state) arguments(caller *Caller, calleeDecl *ast.FuncDecl, assign1 func(*types.Var) bool) ([]*argument, error) { + var args []*argument + + callArgs := caller.Call.Args + if calleeDecl.Recv != nil { + if len(st.callee.impl.TypeParams) > 0 { + return nil, fmt.Errorf("cannot inline: generic methods not yet supported") + } + sel := ast.Unparen(caller.Call.Fun).(*ast.SelectorExpr) + seln := caller.Info.Selections[sel] + var recvArg ast.Expr + switch seln.Kind() { + case types.MethodVal: // recv.f(callArgs) + recvArg = sel.X + case types.MethodExpr: // T.f(recv, callArgs) + recvArg = callArgs[0] + callArgs = callArgs[1:] + } + if recvArg != nil { + // Compute all the type-based predicates now, + // before we start meddling with the syntax; + // the meddling will update them. + arg := &argument{ + expr: recvArg, + typ: caller.Info.TypeOf(recvArg), + constant: caller.Info.Types[recvArg].Value, + pure: pure(caller.Info, assign1, recvArg), + effects: st.effects(caller.Info, recvArg), + duplicable: duplicable(caller.Info, recvArg), + freevars: freeVars(caller.Info, recvArg), + } + recvArg = nil // prevent accidental use + + // Move receiver argument recv.f(args) to argument list f(&recv, args). + args = append(args, arg) + + // Make field selections explicit (recv.f -> recv.y.f), + // updating arg.{expr,typ}. + indices := seln.Index() + for _, index := range indices[:len(indices)-1] { + fld := typeparams.CoreType(typeparams.Deref(arg.typ)).(*types.Struct).Field(index) + if fld.Pkg() != caller.Types && !fld.Exported() { + return nil, fmt.Errorf("in %s, implicit reference to unexported field .%s cannot be made explicit", + debugFormatNode(caller.Fset, caller.Call.Fun), + fld.Name()) + } + if isPointer(arg.typ) { + arg.pure = false // implicit *ptr operation => impure + } + arg.expr = &ast.SelectorExpr{ + X: arg.expr, + Sel: makeIdent(fld.Name()), + } + arg.typ = fld.Type() + arg.duplicable = false + } + + // Make * or & explicit. + argIsPtr := isPointer(arg.typ) + paramIsPtr := isPointer(seln.Obj().Type().Underlying().(*types.Signature).Recv().Type()) + if !argIsPtr && paramIsPtr { + // &recv + arg.expr = &ast.UnaryExpr{Op: token.AND, X: arg.expr} + arg.typ = types.NewPointer(arg.typ) + arg.desugaredRecv = true + } else if argIsPtr && !paramIsPtr { + // *recv + arg.expr = &ast.StarExpr{X: arg.expr} + arg.typ = typeparams.Deref(arg.typ) + arg.duplicable = false + arg.pure = false + arg.desugaredRecv = true + } + } + } + for _, expr := range callArgs { + tv := caller.Info.Types[expr] + args = append(args, &argument{ + expr: expr, + typ: tv.Type, + constant: tv.Value, + spread: is[*types.Tuple](tv.Type), // => last + pure: pure(caller.Info, assign1, expr), + effects: st.effects(caller.Info, expr), + duplicable: duplicable(caller.Info, expr), + freevars: freeVars(caller.Info, expr), + }) + } + + // Re-typecheck each constant argument expression in a neutral context. + // + // In a call such as func(int16){}(1), the type checker infers + // the type "int16", not "untyped int", for the argument 1, + // because it has incorporated information from the left-hand + // side of the assignment implicit in parameter passing, but + // of course in a different context, the expression 1 may have + // a different type. + // + // So, we must use CheckExpr to recompute the type of the + // argument in a neutral context to find its inherent type. + // (This is arguably a bug in go/types, but I'm pretty certain + // I requested it be this way long ago... -adonovan) + // + // This is only needed for constants. Other implicit + // assignment conversions, such as unnamed-to-named struct or + // chan to <-chan, do not result in the type-checker imposing + // the LHS type on the RHS value. + for _, arg := range args { + if arg.constant == nil { + continue + } + info := &types.Info{Types: make(map[ast.Expr]types.TypeAndValue)} + if err := types.CheckExpr(caller.Fset, caller.Types, caller.Call.Pos(), arg.expr, info); err != nil { + return nil, err + } + arg.typ = info.TypeOf(arg.expr) + } + + return args, nil +} + +type parameter struct { + obj *types.Var // parameter var from caller's signature + fieldType ast.Expr // syntax of type, from calleeDecl.Type.{Recv,Params} + info *paramInfo // information from AnalyzeCallee + variadic bool // (final) parameter is unsimplified ...T +} + +// A replacer replaces an identifier at the given offset in the callee. +// The replacement tree must not belong to the caller; use cloneNode as needed. +// If unpackVariadic is set, the replacement is a composite resulting from +// variadic elimination, and may be unpacked into variadic calls. +type replacer = func(offset int, repl ast.Expr, unpackVariadic bool) + +// substituteTypeParams replaces type parameters in the callee with the corresponding type arguments +// from the call. +func substituteTypeParams(logf logger, typeParams []*paramInfo, typeArgs []*argument, params []*parameter, replace replacer) error { + assert(len(typeParams) == len(typeArgs), "mismatched number of type params/args") + for i, paramInfo := range typeParams { + arg := typeArgs[i] + // Perform a simplified, conservative shadow analysis: fail if there is any shadowing. + for free := range arg.freevars { + if paramInfo.Shadow[free] != 0 { + return fmt.Errorf("cannot inline: type argument #%d (type parameter %s) is shadowed", i, paramInfo.Name) + } + } + logf("replacing type param %s with %s", paramInfo.Name, debugFormatNode(token.NewFileSet(), arg.expr)) + for _, ref := range paramInfo.Refs { + replace(ref.Offset, internalastutil.CloneNode(arg.expr), false) + } + // Also replace parameter field types. + // TODO(jba): find a way to do this that is not so slow and clumsy. + // Ideally, we'd walk each p.fieldType once, replacing all type params together. + for _, p := range params { + if id, ok := p.fieldType.(*ast.Ident); ok && id.Name == paramInfo.Name { + p.fieldType = arg.expr + } else { + for _, id := range identsNamed(p.fieldType, paramInfo.Name) { + replaceNode(p.fieldType, id, arg.expr) + } + } + } + } + return nil +} + +func identsNamed(n ast.Node, name string) []*ast.Ident { + var ids []*ast.Ident + ast.Inspect(n, func(n ast.Node) bool { + if id, ok := n.(*ast.Ident); ok && id.Name == name { + ids = append(ids, id) + } + return true + }) + return ids +} + +// substitute implements parameter elimination by substitution. +// +// It considers each parameter and its corresponding argument in turn +// and evaluate these conditions: +// +// - the parameter is neither address-taken nor assigned; +// - the argument is pure; +// - if the parameter refcount is zero, the argument must +// not contain the last use of a local var; +// - if the parameter refcount is > 1, the argument must be duplicable; +// - the argument (or types.Default(argument) if it's untyped) has +// the same type as the parameter. +// +// If all conditions are met then the parameter can be substituted and +// each reference to it replaced by the argument. In that case, the +// replaceCalleeID function is called for each reference to the +// parameter, and is provided with its relative offset and replacement +// expression (argument), and the corresponding elements of params and +// args are replaced by nil. +func substitute(logf logger, caller *Caller, params []*parameter, args []*argument, effects []int, falcon falconResult, replace replacer) { + // Inv: + // in calls to variadic, len(args) >= len(params)-1 + // in spread calls to non-variadic, len(args) < len(params) + // in spread calls to variadic, len(args) <= len(params) + // (In spread calls len(args) = 1, or 2 if call has receiver.) + // Non-spread variadics have been simplified away already, + // so the args[i] lookup is safe if we stop after the spread arg. + assert(len(args) <= len(params), "too many arguments") + + // Collect candidates for substitution. + // + // An argument is a candidate if it is not otherwise rejected, and any free + // variables that are shadowed only by other parameters. + // + // Therefore, substitution candidates are represented by a graph, where edges + // lead from each argument to the other arguments that, if substituted, would + // allow the argument to be substituted. We collect these edges in the + // [substGraph]. Any node that is known not to be elided from the graph. + // Arguments in this graph with no edges are substitutable independent of + // other nodes, though they may be removed due to falcon or effects analysis. + sg := make(substGraph) +next: + for i, param := range params { + arg := args[i] + + // Check argument against parameter. + // + // Beware: don't use types.Info on arg since + // the syntax may be synthetic (not created by parser) + // and thus lacking positions and types; + // do it earlier (see pure/duplicable/freevars). + + if arg.spread { + // spread => last argument, but not always last parameter + logf("keeping param %q and following ones: argument %s is spread", + param.info.Name, debugFormatNode(caller.Fset, arg.expr)) + return // give up + } + assert(!param.variadic, "unsimplified variadic parameter") + if param.info.Escapes { + logf("keeping param %q: escapes from callee", param.info.Name) + continue + } + if param.info.Assigned { + logf("keeping param %q: assigned by callee", param.info.Name) + continue // callee needs the parameter variable + } + if len(param.info.Refs) > 1 && !arg.duplicable { + logf("keeping param %q: argument is not duplicable", param.info.Name) + continue // incorrect or poor style to duplicate an expression + } + if len(param.info.Refs) == 0 { + if arg.effects { + logf("keeping param %q: though unreferenced, it has effects", param.info.Name) + continue + } + + // If the caller is within a function body, + // eliminating an unreferenced parameter might + // remove the last reference to a caller local var. + if caller.enclosingFunc != nil { + for free := range arg.freevars { + // TODO(rfindley): we can get this 100% right by looking for + // references among other arguments which have non-zero references + // within the callee. + if v, ok := caller.lookup(free).(*types.Var); ok && within(v.Pos(), caller.enclosingFunc.Body) && !isUsedOutsideCall(caller, v) { + + // Check to see if the substituted var is used within other args + // whose corresponding params ARE used in the callee + usedElsewhere := func() bool { + for i, param := range params { + if i < len(args) && len(param.info.Refs) > 0 { // excludes original param + for name := range args[i].freevars { + if caller.lookup(name) == v { + return true + } + } + } + } + return false + } + if !usedElsewhere() { + logf("keeping param %q: arg contains perhaps the last reference to caller local %v @ %v", + param.info.Name, v, caller.Fset.PositionFor(v.Pos(), false)) + continue next + } + } + } + } + } + + // Arg is a potential substitution candidate: analyze its shadowing. + // + // Consider inlining a call f(z, 1) to + // + // func f(x, y int) int { z := y; return x + y + z } + // + // we can't replace x in the body by z (or any + // expression that has z as a free identifier) because there's an + // intervening declaration of z that would shadow the caller's one. + // + // However, we *could* replace x in the body by y, as long as the y + // parameter is also removed by substitution. + + sg[arg] = nil // Absent shadowing, the arg is substitutable. + for free := range arg.freevars { + switch s := param.info.Shadow[free]; { + case s < 0: + // Shadowed by a non-parameter symbol, so arg is not substitutable. + delete(sg, arg) + case s > 0: + // Shadowed by a parameter; arg may be substitutable, if only shadowed + // by other substitutable parameters. + if s > len(args) { + // Defensive: this should not happen in the current factoring, since + // spread arguments are already handled. + delete(sg, arg) + } + if edges, ok := sg[arg]; ok { + sg[arg] = append(edges, args[s-1]) + } + } + } + } + + // Process the initial state of the substitution graph. + sg.prune() + + // Now we check various conditions on the substituted argument set as a + // whole. These conditions reject substitution candidates, but since their + // analysis depends on the full set of candidates, we do not process side + // effects of their candidate rejection until after the analysis completes, + // in a call to prune. After pruning, we must re-run the analysis to check + // for additional rejections. + // + // Here's an example of that in practice: + // + // var a [3]int + // + // func falcon(x, y, z int) { + // _ = x + a[y+z] + // } + // + // func _() { + // var y int + // const x, z = 1, 2 + // falcon(y, x, z) + // } + // + // In this example, arguments 0 and 1 are shadowed by each other's + // corresponding parameter, and so each can be substituted only if they are + // both substituted. But the fallible constant analysis finds a violated + // constraint: x + z = 3, and so the constant array index would cause a + // compile-time error if argument 1 (x) were substituted. Therefore, + // following the falcon analysis, we must also prune argument 0. + // + // As far as I (rfindley) can tell, the falcon analysis should always succeed + // after the first pass, as it's not possible for additional bindings to + // cause new constraint failures. Nevertheless, we re-run it to be sure. + // + // However, the same cannot be said of the effects analysis, as demonstrated + // by this example: + // + // func effects(w, x, y, z int) { + // _ = x + w + y + z + // } + + // func _() { + // v := 0 + // w := func() int { v++; return 0 } + // x := func() int { v++; return 0 } + // y := func() int { v++; return 0 } + // effects(x(), w(), y(), x()) //@ inline(re"effects", effects) + // } + // + // In this example, arguments 0, 1, and 3 are related by the substitution + // graph. The first effects analysis implies that arguments 0 and 1 must be + // bound, and therefore argument 3 must be bound. But then a subsequent + // effects analysis forces argument 2 to also be bound. + + // Reject constant arguments as substitution candidates if they cause + // violation of falcon constraints. + // + // Keep redoing the analysis until we no longer reject additional arguments, + // as the set of substituted parameters affects the falcon package. + for checkFalconConstraints(logf, params, args, falcon, sg) { + sg.prune() + } + + // As a final step, introduce bindings to resolve any + // evaluation order hazards. This must be done last, as + // additional subsequent bindings could introduce new hazards. + // + // As with the falcon analysis, keep redoing the analysis until the no more + // arguments are rejected. + for resolveEffects(logf, args, effects, sg) { + sg.prune() + } + + // The remaining candidates are safe to substitute. + for i, param := range params { + if arg := args[i]; sg.has(arg) { + + // It is safe to substitute param and replace it with arg. + // The formatter introduces parens as needed for precedence. + // + // Because arg.expr belongs to the caller, + // we clone it before splicing it into the callee tree. + logf("replacing parameter %q by argument %q", + param.info.Name, debugFormatNode(caller.Fset, arg.expr)) + for _, ref := range param.info.Refs { + // Apply any transformations necessary for this reference. + argExpr := arg.expr + + // If the reference itself is being selected, and we applied desugaring + // (an explicit &x or *x), we can undo that desugaring here as it is + // not necessary for a selector. We don't need to check addressability + // here because if we desugared, the receiver must have been + // addressable. + if ref.IsSelectionOperand && arg.desugaredRecv { + switch e := argExpr.(type) { + case *ast.UnaryExpr: + argExpr = e.X + case *ast.StarExpr: + argExpr = e.X + } + } + + // If the reference requires exact type agreement between parameter and + // argument, wrap the argument in an explicit conversion if + // substitution might materially change its type. (We already did the + // necessary shadowing check on the parameter type syntax.) + // + // The types must agree in any of these cases: + // - the argument affects type inference; + // - the reference's concrete type is assigned to an interface type; + // - the reference is not an assignment, nor a trivial conversion of an untyped constant. + // + // In all other cases, no explicit conversion is necessary as either + // the type does not matter, or must have already agreed for well-typed + // code. + // + // This is only needed for substituted arguments. All other arguments + // are given explicit types in either a binding decl or when using the + // literalization strategy. + // + // If the types are identical, we can eliminate + // redundant type conversions such as this: + // + // Callee: + // func f(i int32) { fmt.Println(i) } + // Caller: + // func g() { f(int32(1)) } + // Inlined as: + // func g() { fmt.Println(int32(int32(1))) + // + // Recall that non-trivial does not imply non-identical for constant + // conversions; however, at this point state.arguments has already + // re-typechecked the constant and set arg.type to its (possibly + // "untyped") inherent type, so the conversion from untyped 1 to int32 + // is non-trivial even though both arg and param have identical types + // (int32). + needType := ref.AffectsInference || + (ref.Assignable && ref.IfaceAssignment && !param.info.IsInterface) || + (!ref.Assignable && !trivialConversion(arg.constant, arg.typ, param.obj.Type())) + + if needType && + !types.Identical(types.Default(arg.typ), param.obj.Type()) { + + // If arg.expr is already an interface call, strip it. + if call, ok := argExpr.(*ast.CallExpr); ok && len(call.Args) == 1 { + if typ, ok := isConversion(caller.Info, call); ok && isNonTypeParamInterface(typ) { + argExpr = call.Args[0] + } + } + + argExpr = convert(param.fieldType, argExpr) + logf("param %q (offset %d): adding explicit %s -> %s conversion around argument", + param.info.Name, ref.Offset, arg.typ, param.obj.Type()) + } + replace(ref.Offset, internalastutil.CloneNode(argExpr).(ast.Expr), arg.variadic) + } + params[i] = nil // substituted + args[i] = nil // substituted + } + } +} + +// isConversion reports whether the given call is a type conversion, returning +// (operand, true) if so. +// +// If the call is not a conversion, it returns (nil, false). +func isConversion(info *types.Info, call *ast.CallExpr) (types.Type, bool) { + if tv, ok := info.Types[call.Fun]; ok && tv.IsType() { + return tv.Type, true + } + return nil, false +} + +// isNonTypeParamInterface reports whether t is a non-type parameter interface +// type. +func isNonTypeParamInterface(t types.Type) bool { + return !typeparams.IsTypeParam(t) && types.IsInterface(t) +} + +// isUsedOutsideCall reports whether v is used outside of caller.Call, within +// the body of caller.enclosingFunc. +func isUsedOutsideCall(caller *Caller, v *types.Var) bool { + used := false + ast.Inspect(caller.enclosingFunc.Body, func(n ast.Node) bool { + if n == caller.Call { + return false + } + switch n := n.(type) { + case *ast.Ident: + if use := caller.Info.Uses[n]; use == v { + used = true + } + case *ast.FuncType: + // All params are used. + for _, fld := range n.Params.List { + for _, n := range fld.Names { + if def := caller.Info.Defs[n]; def == v { + used = true + } + } + } + } + return !used // keep going until we find a use + }) + return used +} + +// checkFalconConstraints checks whether constant arguments +// are safe to substitute (e.g. s[i] -> ""[0] is not safe.) +// +// Any failed constraint causes us to reject all constant arguments as +// substitution candidates (by clearing args[i].substitution=false). +// +// TODO(adonovan): we could obtain a finer result rejecting only the +// freevars of each failed constraint, and processing constraints in +// order of increasing arity, but failures are quite rare. +func checkFalconConstraints(logf logger, params []*parameter, args []*argument, falcon falconResult, sg substGraph) bool { + // Create a dummy package, as this is the only + // way to create an environment for CheckExpr. + pkg := types.NewPackage("falcon", "falcon") + + // Declare types used by constraints. + for _, typ := range falcon.Types { + logf("falcon env: type %s %s", typ.Name, types.Typ[typ.Kind]) + pkg.Scope().Insert(types.NewTypeName(token.NoPos, pkg, typ.Name, types.Typ[typ.Kind])) + } + + // Declared constants and variables for for parameters. + nconst := 0 + for i, param := range params { + name := param.info.Name + if name == "" { + continue // unreferenced + } + arg := args[i] + if arg.constant != nil && sg.has(arg) && param.info.FalconType != "" { + t := pkg.Scope().Lookup(param.info.FalconType).Type() + pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, t, arg.constant)) + logf("falcon env: const %s %s = %v", name, param.info.FalconType, arg.constant) + nconst++ + } else { + v := types.NewVar(token.NoPos, pkg, name, arg.typ) + typesinternal.SetVarKind(v, typesinternal.PackageVar) + pkg.Scope().Insert(v) + logf("falcon env: var %s %s", name, arg.typ) + } + } + if nconst == 0 { + return false // nothing to do + } + + // Parse and evaluate the constraints in the environment. + fset := token.NewFileSet() + removed := false + for _, falcon := range falcon.Constraints { + expr, err := parser.ParseExprFrom(fset, "falcon", falcon, 0) + if err != nil { + panic(fmt.Sprintf("failed to parse falcon constraint %s: %v", falcon, err)) + } + if err := types.CheckExpr(fset, pkg, token.NoPos, expr, nil); err != nil { + logf("falcon: constraint %s violated: %v", falcon, err) + for j, arg := range args { + if arg.constant != nil && sg.has(arg) { + logf("keeping param %q due falcon violation", params[j].info.Name) + removed = sg.remove(arg) || removed + } + } + break + } + logf("falcon: constraint %s satisfied", falcon) + } + return removed +} + +// resolveEffects marks arguments as non-substitutable to resolve +// hazards resulting from the callee evaluation order described by the +// effects list. +// +// To do this, each argument is categorized as a read (R), write (W), +// or pure. A hazard occurs when the order of evaluation of a W +// changes with respect to any R or W. Pure arguments can be +// effectively ignored, as they can be safely evaluated in any order. +// +// The callee effects list contains the index of each parameter in the +// order it is first evaluated during execution of the callee. In +// addition, the two special values R∞ and W∞ indicate the relative +// position of the callee's first non-parameter read and its first +// effects (or other unknown behavior). +// For example, the list [0 2 1 R∞ 3 W∞] for func(a, b, c, d) +// indicates that the callee referenced parameters a, c, and b, +// followed by an arbitrary read, then parameter d, and finally +// unknown behavior. +// +// When an argument is marked as not substitutable, we say that it is +// 'bound', in the sense that its evaluation occurs in a binding decl +// or literalized call. Such bindings always occur in the original +// callee parameter order. +// +// In this context, "resolving hazards" means binding arguments so +// that they are evaluated in a valid, hazard-free order. A trivial +// solution to this problem would be to bind all arguments, but of +// course that's not useful. The goal is to bind as few arguments as +// possible. +// +// The algorithm proceeds by inspecting arguments in reverse parameter +// order (right to left), preserving the invariant that every +// higher-ordered argument is either already substituted or does not +// need to be substituted. At each iteration, if there is an +// evaluation hazard in the callee effects relative to the current +// argument, the argument must be bound. Subsequently, if the argument +// is bound for any reason, each lower-ordered argument must also be +// bound if either the argument or lower-order argument is a +// W---otherwise the binding itself would introduce a hazard. +// +// Thus, after each iteration, there are no hazards relative to the +// current argument. Subsequent iterations cannot introduce hazards +// with that argument because they can result only in additional +// binding of lower-ordered arguments. +func resolveEffects(logf logger, args []*argument, effects []int, sg substGraph) bool { + effectStr := func(effects bool, idx int) string { + i := fmt.Sprint(idx) + if idx == len(args) { + i = "∞" + } + return string("RW"[btoi(effects)]) + i + } + removed := false + for i := len(args) - 1; i >= 0; i-- { + argi := args[i] + if sg.has(argi) && !argi.pure { + // i is not bound: check whether it must be bound due to hazards. + idx := slices.Index(effects, i) + if idx >= 0 { + for _, j := range effects[:idx] { + var ( + ji int // effective param index + jw bool // j is a write + ) + if j == winf || j == rinf { + jw = j == winf + ji = len(args) + } else { + jw = args[j].effects + ji = j + } + if ji > i && (jw || argi.effects) { // out of order evaluation + logf("binding argument %s: preceded by %s", + effectStr(argi.effects, i), effectStr(jw, ji)) + + removed = sg.remove(argi) || removed + break + } + } + } + } + if !sg.has(argi) { + for j := 0; j < i; j++ { + argj := args[j] + if argj.pure { + continue + } + if (argi.effects || argj.effects) && sg.has(argj) { + logf("binding argument %s: %s is bound", + effectStr(argj.effects, j), effectStr(argi.effects, i)) + + removed = sg.remove(argj) || removed + } + } + } + } + return removed +} + +// A substGraph is a directed graph representing arguments that may be +// substituted, provided all of their related arguments (or "dependencies") are +// also substituted. The candidates arguments for substitution are the keys in +// this graph, and the edges represent shadowing of free variables of the key +// by parameters corresponding to the dependency arguments. +// +// Any argument not present as a map key is known not to be substitutable. Some +// arguments may have edges leading to other arguments that are not present in +// the graph. In this case, those arguments also cannot be substituted, because +// they have free variables that are shadowed by parameters that cannot be +// substituted. Calling [substGraph.prune] removes these arguments from the +// graph. +// +// The 'prune' operation is not built into the 'remove' step both because +// analyses (falcon, effects) need local information about each argument +// independent of dependencies, and for the efficiency of pruning once en masse +// after each analysis. +type substGraph map[*argument][]*argument + +// has reports whether arg is a candidate for substitution. +func (g substGraph) has(arg *argument) bool { + _, ok := g[arg] + return ok +} + +// remove marks arg as not substitutable, reporting whether the arg was +// previously substitutable. +// +// remove does not have side effects on other arguments that may be +// unsubstitutable as a result of their dependency being removed. +// Call [substGraph.prune] to propagate these side effects, removing dependent +// arguments. +func (g substGraph) remove(arg *argument) bool { + pre := len(g) + delete(g, arg) + return len(g) < pre +} + +// prune updates the graph to remove any keys that reach other arguments not +// present in the graph. +func (g substGraph) prune() { + // visit visits the forward transitive closure of arg and reports whether any + // missing argument was encountered, removing all nodes on the path to it + // from arg. + // + // The seen map is used for cycle breaking. In the presence of cycles, visit + // may report a false positive for an intermediate argument. For example, + // consider the following graph, where only a and b are candidates for + // substitution (meaning, only a and b are present in the graph). + // + // a ↔ b + // ↓ + // [c] + // + // In this case, starting a visit from a, visit(b, seen) may report 'true', + // because c has not yet been considered. For this reason, we must guarantee + // that visit is called with an empty seen map at least once for each node. + var visit func(*argument, map[*argument]unit) bool + visit = func(arg *argument, seen map[*argument]unit) bool { + deps, ok := g[arg] + if !ok { + return false + } + if _, ok := seen[arg]; !ok { + seen[arg] = unit{} + for _, dep := range deps { + if !visit(dep, seen) { + delete(g, arg) + return false + } + } + } + return true + } + for arg := range g { + // Remove any argument that is, or transitively depends upon, + // an unsubstitutable argument. + // + // Each visitation gets a fresh cycle-breaking set. + visit(arg, make(map[*argument]unit)) + } +} + +// updateCalleeParams updates the calleeDecl syntax to remove +// substituted parameters and move the receiver (if any) to the head +// of the ordinary parameters. +func updateCalleeParams(calleeDecl *ast.FuncDecl, params []*parameter) { + // The logic is fiddly because of the three forms of ast.Field: + // + // func(int), func(x int), func(x, y int) + // + // Also, ensure that all remaining parameters are named + // to avoid a mix of named/unnamed when joining (recv, params...). + // func (T) f(int, bool) -> (_ T, _ int, _ bool) + // (Strictly, we need do this only for methods and only when + // the namednesses of Recv and Params differ; that might be tidier.) + + paramIdx := 0 // index in original parameter list (incl. receiver) + var newParams []*ast.Field + filterParams := func(field *ast.Field) { + var names []*ast.Ident + if field.Names == nil { + // Unnamed parameter field (e.g. func f(int) + if params[paramIdx] != nil { + // Give it an explicit name "_" since we will + // make the receiver (if any) a regular parameter + // and one cannot mix named and unnamed parameters. + names = append(names, makeIdent("_")) + } + paramIdx++ + } else { + // Named parameter field e.g. func f(x, y int) + // Remove substituted parameters in place. + // If all were substituted, delete field. + for _, id := range field.Names { + if pinfo := params[paramIdx]; pinfo != nil { + // Rename unreferenced parameters with "_". + // This is crucial for binding decls, since + // unlike parameters, they are subject to + // "unreferenced var" checks. + if len(pinfo.info.Refs) == 0 { + id = makeIdent("_") + } + names = append(names, id) + } + paramIdx++ + } + } + if names != nil { + newParams = append(newParams, &ast.Field{ + Names: names, + Type: field.Type, + }) + } + } + if calleeDecl.Recv != nil { + filterParams(calleeDecl.Recv.List[0]) + calleeDecl.Recv = nil + } + for _, field := range calleeDecl.Type.Params.List { + filterParams(field) + } + calleeDecl.Type.Params.List = newParams +} + +// bindingDeclInfo records information about the binding decl produced by +// createBindingDecl. +type bindingDeclInfo struct { + names map[string]bool // names bound by the binding decl; possibly empty + stmt ast.Stmt // the binding decl itself +} + +// createBindingDecl constructs a "binding decl" that implements +// parameter assignment and declares any named result variables +// referenced by the callee. It returns nil if there were no +// unsubstituted parameters. +// +// It may not always be possible to create the decl (e.g. due to +// shadowing), in which case it also returns nil; but if it succeeds, +// the declaration may be used by reduction strategies to relax the +// requirement that all parameters have been substituted. +// +// For example, a call: +// +// f(a0, a1, a2) +// +// where: +// +// func f(p0, p1 T0, p2 T1) { body } +// +// reduces to: +// +// { +// var ( +// p0, p1 T0 = a0, a1 +// p2 T1 = a2 +// ) +// body +// } +// +// so long as p0, p1 ∉ freevars(T1) or freevars(a2), and so on, +// because each spec is statically resolved in sequence and +// dynamically assigned in sequence. By contrast, all +// parameters are resolved simultaneously and assigned +// simultaneously. +// +// The pX names should already be blank ("_") if the parameter +// is unreferenced; this avoids "unreferenced local var" checks. +// +// Strategies may impose additional checks on return +// conversions, labels, defer, etc. +func createBindingDecl(logf logger, caller *Caller, args []*argument, calleeDecl *ast.FuncDecl, results []*paramInfo) *bindingDeclInfo { + // Spread calls are tricky as they may not align with the + // parameters' field groupings nor types. + // For example, given + // func g() (int, string) + // the call + // f(g()) + // is legal with these decls of f: + // func f(int, string) + // func f(x, y any) + // func f(x, y ...any) + // TODO(adonovan): support binding decls for spread calls by + // splitting parameter groupings as needed. + if lastArg := last(args); lastArg != nil && lastArg.spread { + logf("binding decls not yet supported for spread calls") + return nil + } + + var ( + specs []ast.Spec + names = make(map[string]bool) // names defined by previous specs + ) + // shadow reports whether any name referenced by spec is + // shadowed by a name declared by a previous spec (since, + // unlike parameters, each spec of a var decl is within the + // scope of the previous specs). + shadow := func(spec *ast.ValueSpec) bool { + // Compute union of free names of type and values + // and detect shadowing. Values is the arguments + // (caller syntax), so we can use type info. + // But Type is the untyped callee syntax, + // so we have to use a syntax-only algorithm. + free := make(map[string]bool) + for _, value := range spec.Values { + for name := range freeVars(caller.Info, value) { + free[name] = true + } + } + const includeComplitIdents = true + freeishNames(free, spec.Type, includeComplitIdents) + for name := range free { + if names[name] { + logf("binding decl would shadow free name %q", name) + return true + } + } + for _, id := range spec.Names { + if id.Name != "_" { + names[id.Name] = true + } + } + return false + } + + // parameters + // + // Bind parameters that were not eliminated through + // substitution. (Non-nil arguments correspond to the + // remaining parameters in calleeDecl.) + var values []ast.Expr + for _, arg := range args { + if arg != nil { + values = append(values, arg.expr) + } + } + for _, field := range calleeDecl.Type.Params.List { + // Each field (param group) becomes a ValueSpec. + spec := &ast.ValueSpec{ + Names: cleanNodes(field.Names), + Type: cleanNode(field.Type), + Values: values[:len(field.Names)], + } + values = values[len(field.Names):] + if shadow(spec) { + return nil + } + specs = append(specs, spec) + } + assert(len(values) == 0, "args/params mismatch") + + // results + // + // Add specs to declare any named result + // variables that are referenced by the body. + if calleeDecl.Type.Results != nil { + resultIdx := 0 + for _, field := range calleeDecl.Type.Results.List { + if field.Names == nil { + resultIdx++ + continue // unnamed field + } + var names []*ast.Ident + for _, id := range field.Names { + if len(results[resultIdx].Refs) > 0 { + names = append(names, id) + } + resultIdx++ + } + if len(names) > 0 { + spec := &ast.ValueSpec{ + Names: cleanNodes(names), + Type: cleanNode(field.Type), + } + if shadow(spec) { + return nil + } + specs = append(specs, spec) + } + } + } + + if len(specs) == 0 { + logf("binding decl not needed: all parameters substituted") + return nil + } + + stmt := &ast.DeclStmt{ + Decl: &ast.GenDecl{ + Tok: token.VAR, + Specs: specs, + }, + } + logf("binding decl: %s", debugFormatNode(caller.Fset, stmt)) + return &bindingDeclInfo{names: names, stmt: stmt} +} + +// lookup does a symbol lookup in the lexical environment of the caller. +func (caller *Caller) lookup(name string) types.Object { + pos := caller.Call.Pos() + for _, n := range caller.path { + if scope := scopeFor(caller.Info, n); scope != nil { + if _, obj := scope.LookupParent(name, pos); obj != nil { + return obj + } + } + } + return nil +} + +func scopeFor(info *types.Info, n ast.Node) *types.Scope { + // The function body scope (containing not just params) + // is associated with the function's type, not body. + switch fn := n.(type) { + case *ast.FuncDecl: + n = fn.Type + case *ast.FuncLit: + n = fn.Type + } + return info.Scopes[n] +} + +// -- predicates over expressions -- + +// freeVars returns the names of all free identifiers of e: +// those lexically referenced by it but not defined within it. +// (Fields and methods are not included.) +func freeVars(info *types.Info, e ast.Expr) map[string]bool { + free := make(map[string]bool) + ast.Inspect(e, func(n ast.Node) bool { + if id, ok := n.(*ast.Ident); ok { + // The isField check is so that we don't treat T{f: 0} as a ref to f. + if obj, ok := info.Uses[id]; ok && !within(obj.Pos(), e) && !isField(obj) { + free[obj.Name()] = true + } + } + return true + }) + return free +} + +// effects reports whether an expression might change the state of the +// program (through function calls and channel receives) and affect +// the evaluation of subsequent expressions. +func (st *state) effects(info *types.Info, expr ast.Expr) bool { + effects := false + ast.Inspect(expr, func(n ast.Node) bool { + switch n := n.(type) { + case *ast.FuncLit: + return false // prune descent + + case *ast.CallExpr: + if info.Types[n.Fun].IsType() { + // A conversion T(x) has only the effect of its operand. + } else if !typesinternal.CallsPureBuiltin(info, n) { + // A handful of built-ins have no effect + // beyond those of their arguments. + // All other calls (including append, copy, recover) + // have unknown effects. + // + // As with 'pure', there is room for + // improvement by inspecting the callee. + effects = true + } + + case *ast.UnaryExpr: + if n.Op == token.ARROW { // <-ch + effects = true + } + } + return true + }) + + // Even if consideration of effects is not desired, + // we continue to compute, log, and discard them. + if st.opts.IgnoreEffects && effects { + effects = false + st.opts.Logf("ignoring potential effects of argument %s", + debugFormatNode(st.caller.Fset, expr)) + } + + return effects +} + +// pure reports whether an expression has the same result no matter +// when it is executed relative to other expressions, so it can be +// commuted with any other expression or statement without changing +// its meaning. +// +// An expression is considered impure if it reads the contents of any +// variable, with the exception of "single assignment" local variables +// (as classified by the provided callback), which are never updated +// after their initialization. +// +// Pure does not imply duplicable: for example, new(T) and T{} are +// pure expressions but both return a different value each time they +// are evaluated, so they are not safe to duplicate. +// +// Purity does not imply freedom from run-time panics. We assume that +// target programs do not encounter run-time panics nor depend on them +// for correct operation. +// +// TODO(adonovan): add unit tests of this function. +func pure(info *types.Info, assign1 func(*types.Var) bool, e ast.Expr) bool { + var pure func(e ast.Expr) bool + pure = func(e ast.Expr) bool { + switch e := e.(type) { + case *ast.ParenExpr: + return pure(e.X) + + case *ast.Ident: + if v, ok := info.Uses[e].(*types.Var); ok { + // In general variables are impure + // as they may be updated, but + // single-assignment local variables + // never change value. + // + // We assume all package-level variables + // may be updated, but for non-exported + // ones we could do better by analyzing + // the complete package. + return !isPkgLevel(v) && assign1(v) + } + + // All other kinds of reference are pure. + return true + + case *ast.FuncLit: + // A function literal may allocate a closure that + // references mutable variables, but mutation + // cannot be observed without calling the function, + // and calls are considered impure. + return true + + case *ast.BasicLit: + return true + + case *ast.UnaryExpr: // + - ! ^ & but not <- + return e.Op != token.ARROW && pure(e.X) + + case *ast.BinaryExpr: // arithmetic, shifts, comparisons, &&/|| + return pure(e.X) && pure(e.Y) + + case *ast.CallExpr: + // A conversion is as pure as its operand. + if info.Types[e.Fun].IsType() { + return pure(e.Args[0]) + } + + // Calls to some built-ins are as pure as their arguments. + if typesinternal.CallsPureBuiltin(info, e) { + for _, arg := range e.Args { + if !pure(arg) { + return false + } + } + return true + } + + // All other calls are impure, so we can + // reject them without even looking at e.Fun. + // + // More sophisticated analysis could infer purity in + // commonly used functions such as strings.Contains; + // perhaps we could offer the client a hook so that + // go/analysis-based implementation could exploit the + // results of a purity analysis. But that would make + // the inliner's choices harder to explain. + return false + + case *ast.CompositeLit: + // T{...} is as pure as its elements. + for _, elt := range e.Elts { + if kv, ok := elt.(*ast.KeyValueExpr); ok { + if !pure(kv.Value) { + return false + } + if id, ok := kv.Key.(*ast.Ident); ok { + if v, ok := info.Uses[id].(*types.Var); ok && v.IsField() { + continue // struct {field: value} + } + } + // map/slice/array {key: value} + if !pure(kv.Key) { + return false + } + + } else if !pure(elt) { + return false + } + } + return true + + case *ast.SelectorExpr: + if seln, ok := info.Selections[e]; ok { + // See types.SelectionKind for background. + switch seln.Kind() { + case types.MethodExpr: + // A method expression T.f acts like a + // reference to a func decl, so it is pure. + return true + + case types.MethodVal, types.FieldVal: + // A field or method selection x.f is pure + // if x is pure and the selection does + // not indirect a pointer. + return !indirectSelection(seln) && pure(e.X) + + default: + panic(seln) + } + } else { + // A qualified identifier is + // treated like an unqualified one. + return pure(e.Sel) + } + + case *ast.StarExpr: + return false // *ptr depends on the state of the heap + + default: + return false + } + } + return pure(e) +} + +// duplicable reports whether it is appropriate for the expression to +// be freely duplicated. +// +// Given the declaration +// +// func f(x T) T { return x + g() + x } +// +// an argument y is considered duplicable if we would wish to see a +// call f(y) simplified to y+g()+y. This is true for identifiers, +// integer literals, unary negation, and selectors x.f where x is not +// a pointer. But we would not wish to duplicate expressions that: +// - have side effects (e.g. nearly all calls), +// - are not referentially transparent (e.g. &T{}, ptr.field, *ptr), or +// - are long (e.g. "huge string literal"). +func duplicable(info *types.Info, e ast.Expr) bool { + switch e := e.(type) { + case *ast.ParenExpr: + return duplicable(info, e.X) + + case *ast.Ident: + return true + + case *ast.BasicLit: + v := info.Types[e].Value + switch e.Kind { + case token.INT: + return true // any int + case token.STRING: + return consteq(v, kZeroString) // only "" + case token.FLOAT: + return consteq(v, kZeroFloat) || consteq(v, kOneFloat) // only 0.0 or 1.0 + } + + case *ast.UnaryExpr: // e.g. +1, -1 + return (e.Op == token.ADD || e.Op == token.SUB) && duplicable(info, e.X) + + case *ast.CompositeLit: + // Empty struct or array literals T{} are duplicable. + // (Non-empty literals are too verbose, and slice/map + // literals allocate indirect variables.) + if len(e.Elts) == 0 { + switch info.TypeOf(e).Underlying().(type) { + case *types.Struct, *types.Array: + return true + } + } + return false + + case *ast.CallExpr: + // Treat type conversions as duplicable if they do not observably allocate. + // The only cases of observable allocations are + // the `[]byte(string)` and `[]rune(string)` conversions. + // + // Duplicating string([]byte) conversions increases + // allocation but doesn't change behavior, but the + // reverse, []byte(string), allocates a distinct array, + // which is observable. + + if !info.Types[e.Fun].IsType() { // check whether e.Fun is a type conversion + return false + } + + fun := info.TypeOf(e.Fun) + arg := info.TypeOf(e.Args[0]) + + switch fun := fun.Underlying().(type) { + case *types.Slice: + // Do not mark []byte(string) and []rune(string) as duplicable. + elem, ok := fun.Elem().Underlying().(*types.Basic) + if ok && (elem.Kind() == types.Rune || elem.Kind() == types.Byte) { + from, ok := arg.Underlying().(*types.Basic) + isString := ok && from.Info()&types.IsString != 0 + return !isString + } + case *types.TypeParam: + return false // be conservative + } + return true + + case *ast.SelectorExpr: + if seln, ok := info.Selections[e]; ok { + // A field or method selection x.f is referentially + // transparent if it does not indirect a pointer. + return !indirectSelection(seln) + } + // A qualified identifier pkg.Name is referentially transparent. + return true + } + return false +} + +func consteq(x, y constant.Value) bool { + return constant.Compare(x, token.EQL, y) +} + +var ( + kZeroInt = constant.MakeInt64(0) + kZeroString = constant.MakeString("") + kZeroFloat = constant.MakeFloat64(0.0) + kOneFloat = constant.MakeFloat64(1.0) +) + +// -- inline helpers -- + +func assert(cond bool, msg string) { + if !cond { + panic(msg) + } +} + +// blanks returns a slice of n > 0 blank identifiers. +func blanks[E ast.Expr](n int) []E { + if n == 0 { + panic("blanks(0)") + } + res := make([]E, n) + for i := range res { + res[i] = ast.Expr(makeIdent("_")).(E) // ugh + } + return res +} + +func makeIdent(name string) *ast.Ident { + return &ast.Ident{Name: name} +} + +// importedPkgName returns the PkgName object declared by an ImportSpec. +// TODO(adonovan): make this a method of types.Info (#62037). +func importedPkgName(info *types.Info, imp *ast.ImportSpec) (*types.PkgName, bool) { + var obj types.Object + if imp.Name != nil { + obj = info.Defs[imp.Name] + } else { + obj = info.Implicits[imp] + } + pkgname, ok := obj.(*types.PkgName) + return pkgname, ok +} + +func isPkgLevel(obj types.Object) bool { + // TODO(adonovan): consider using the simpler obj.Parent() == + // obj.Pkg().Scope() instead. But be sure to test carefully + // with instantiations of generics. + return obj.Pkg().Scope().Lookup(obj.Name()) == obj +} + +// callContext returns the two nodes immediately enclosing the call +// (specified as a PathEnclosingInterval), ignoring parens. +func callContext(callPath []ast.Node) (parent, grandparent ast.Node) { + _ = callPath[0].(*ast.CallExpr) // sanity check + for _, n := range callPath[1:] { + if !is[*ast.ParenExpr](n) { + if parent == nil { + parent = n + } else { + return parent, n + } + } + } + return parent, nil +} + +// hasLabelConflict reports whether the set of labels of the function +// enclosing the call (specified as a PathEnclosingInterval) +// intersects with the set of callee labels. +func hasLabelConflict(callPath []ast.Node, calleeLabels []string) bool { + labels := callerLabels(callPath) + for _, label := range calleeLabels { + if labels[label] { + return true // conflict + } + } + return false +} + +// callerLabels returns the set of control labels in the function (if +// any) enclosing the call (specified as a PathEnclosingInterval). +func callerLabels(callPath []ast.Node) map[string]bool { + var callerBody *ast.BlockStmt + switch f := callerFunc(callPath).(type) { + case *ast.FuncDecl: + callerBody = f.Body + case *ast.FuncLit: + callerBody = f.Body + } + var labels map[string]bool + if callerBody != nil { + ast.Inspect(callerBody, func(n ast.Node) bool { + switch n := n.(type) { + case *ast.FuncLit: + return false // prune traversal + case *ast.LabeledStmt: + if labels == nil { + labels = make(map[string]bool) + } + labels[n.Label.Name] = true + } + return true + }) + } + return labels +} + +// callerFunc returns the innermost Func{Decl,Lit} node enclosing the +// call (specified as a PathEnclosingInterval). +func callerFunc(callPath []ast.Node) ast.Node { + _ = callPath[0].(*ast.CallExpr) // sanity check + for _, n := range callPath[1:] { + if is[*ast.FuncDecl](n) || is[*ast.FuncLit](n) { + return n + } + } + return nil +} + +// callStmt reports whether the function call (specified +// as a PathEnclosingInterval) appears within an ExprStmt, +// and returns it if so. +// +// If unrestricted, callStmt returns nil if the ExprStmt f() appears +// in a restricted context (such as "if f(); cond {") where it cannot +// be replaced by an arbitrary statement. (See "statement theory".) +func callStmt(callPath []ast.Node, unrestricted bool) *ast.ExprStmt { + parent, _ := callContext(callPath) + stmt, ok := parent.(*ast.ExprStmt) + if ok && unrestricted { + switch callPath[slices.Index(callPath, ast.Node(stmt))+1].(type) { + case *ast.LabeledStmt, + *ast.BlockStmt, + *ast.CaseClause, + *ast.CommClause: + // unrestricted + default: + // TODO(adonovan): handle restricted + // XYZStmt.Init contexts (but not ForStmt.Post) + // by creating a block around the if/for/switch: + // "if f(); cond {" -> "{ stmts; if cond {" + + return nil // restricted + } + } + return stmt +} + +// Statement theory +// +// These are all the places a statement may appear in the AST: +// +// LabeledStmt.Stmt Stmt -- any +// BlockStmt.List []Stmt -- any (but see switch/select) +// IfStmt.Init Stmt? -- simple +// IfStmt.Body BlockStmt +// IfStmt.Else Stmt? -- IfStmt or BlockStmt +// CaseClause.Body []Stmt -- any +// SwitchStmt.Init Stmt? -- simple +// SwitchStmt.Body BlockStmt -- CaseClauses only +// TypeSwitchStmt.Init Stmt? -- simple +// TypeSwitchStmt.Assign Stmt -- AssignStmt(TypeAssertExpr) or ExprStmt(TypeAssertExpr) +// TypeSwitchStmt.Body BlockStmt -- CaseClauses only +// CommClause.Comm Stmt? -- SendStmt or ExprStmt(UnaryExpr) or AssignStmt(UnaryExpr) +// CommClause.Body []Stmt -- any +// SelectStmt.Body BlockStmt -- CommClauses only +// ForStmt.Init Stmt? -- simple +// ForStmt.Post Stmt? -- simple +// ForStmt.Body BlockStmt +// RangeStmt.Body BlockStmt +// +// simple = AssignStmt | SendStmt | IncDecStmt | ExprStmt. +// +// A BlockStmt cannot replace an ExprStmt in +// {If,Switch,TypeSwitch}Stmt.Init or ForStmt.Post. +// That is allowed only within: +// LabeledStmt.Stmt Stmt +// BlockStmt.List []Stmt +// CaseClause.Body []Stmt +// CommClause.Body []Stmt + +// replaceNode performs a destructive update of the tree rooted at +// root, replacing each occurrence of "from" with "to". If to is nil and +// the element is within a slice, the slice element is removed. +// +// The root itself cannot be replaced; an attempt will panic. +// +// This function must not be called on the caller's syntax tree. +// +// TODO(adonovan): polish this up and move it to astutil package. +// TODO(adonovan): needs a unit test. +func replaceNode(root ast.Node, from, to ast.Node) { + if from == nil { + panic("from == nil") + } + if reflect.ValueOf(from).IsNil() { + panic(fmt.Sprintf("from == (%T)(nil)", from)) + } + if from == root { + panic("from == root") + } + found := false + var parent reflect.Value // parent variable of interface type, containing a pointer + var visit func(reflect.Value) + visit = func(v reflect.Value) { + switch v.Kind() { + case reflect.Pointer: + if v.Interface() == from { + found = true + + // If v is a struct field or array element + // (e.g. Field.Comment or Field.Names[i]) + // then it is addressable (a pointer variable). + // + // But if it was the value an interface + // (e.g. *ast.Ident within ast.Node) + // then it is non-addressable, and we need + // to set the enclosing interface (parent). + if !v.CanAddr() { + v = parent + } + + // to=nil => use zero value + var toV reflect.Value + if to != nil { + toV = reflect.ValueOf(to) + } else { + toV = reflect.Zero(v.Type()) // e.g. ast.Expr(nil) + } + v.Set(toV) + + } else if !v.IsNil() { + switch v.Interface().(type) { + case *ast.Object, *ast.Scope: + // Skip fields of types potentially involved in cycles. + default: + visit(v.Elem()) + } + } + + case reflect.Struct: + for i := range v.Type().NumField() { + visit(v.Field(i)) + } + + case reflect.Slice: + compact := false + for i := range v.Len() { + visit(v.Index(i)) + if v.Index(i).IsNil() { + compact = true + } + } + if compact { + // Elements were deleted. Eliminate nils. + // (Do this is a second pass to avoid + // unnecessary writes in the common case.) + j := 0 + for i := range v.Len() { + if !v.Index(i).IsNil() { + v.Index(j).Set(v.Index(i)) + j++ + } + } + v.SetLen(j) + } + case reflect.Interface: + parent = v + visit(v.Elem()) + + case reflect.Array, reflect.Chan, reflect.Func, reflect.Map, reflect.UnsafePointer: + panic(v) // unreachable in AST + default: + // bool, string, number: nop + } + parent = reflect.Value{} + } + visit(reflect.ValueOf(root)) + if !found { + panic(fmt.Sprintf("%T not found", from)) + } +} + +// cleanNode returns a clone of node with positions cleared. +// +// It should be used for any callee nodes that are formatted using the caller +// file set. +func cleanNode[T ast.Node](node T) T { + clone := internalastutil.CloneNode(node) + clearPositions(clone) + return clone +} + +func cleanNodes[T ast.Node](nodes []T) []T { + var clean []T + for _, node := range nodes { + clean = append(clean, cleanNode(node)) + } + return clean +} + +// clearPositions destroys token.Pos information within the tree rooted at root, +// as positions in callee trees may cause caller comments to be emitted prematurely. +// +// In general it isn't safe to clear a valid Pos because some of them +// (e.g. CallExpr.Ellipsis, TypeSpec.Assign) are significant to +// go/printer, so this function sets each non-zero Pos to 1, which +// suffices to avoid advancing the printer's comment cursor. +// +// This function mutates its argument; do not invoke on caller syntax. +// +// TODO(adonovan): remove this horrendous workaround when #20744 is finally fixed. +func clearPositions(root ast.Node) { + posType := reflect.TypeFor[token.Pos]() + ast.Inspect(root, func(n ast.Node) bool { + if n != nil { + v := reflect.ValueOf(n).Elem() // deref the pointer to struct + fields := v.Type().NumField() + for i := range fields { + f := v.Field(i) + // Clearing Pos arbitrarily is destructive, + // as its presence may be semantically significant + // (e.g. CallExpr.Ellipsis, TypeSpec.Assign) + // or affect formatting preferences (e.g. GenDecl.Lparen). + // + // Note: for proper formatting, it may be necessary to be selective + // about which positions we set to 1 vs which we set to token.NoPos. + // (e.g. we can set most to token.NoPos, save the few that are + // significant). + if f.Type() == posType { + if f.Interface() != token.NoPos { + f.Set(reflect.ValueOf(token.Pos(1))) + } + } + } + } + return true + }) +} + +// findIdent finds the Ident beneath root that has the given pos. +// It returns the path to the ident (excluding the ident), and the ident +// itself, where the path is the sequence of ast.Nodes encountered in a +// depth-first search to find ident. +func findIdent(root ast.Node, pos token.Pos) ([]ast.Node, *ast.Ident) { + // TODO(adonovan): opt: skip subtrees that don't contain pos. + var ( + path []ast.Node + found *ast.Ident + ) + ast.Inspect(root, func(n ast.Node) bool { + if found != nil { + return false + } + if n == nil { + path = path[:len(path)-1] + return false + } + if id, ok := n.(*ast.Ident); ok { + if id.Pos() == pos { + found = id + return true + } + } + path = append(path, n) + return true + }) + if found == nil { + panic(fmt.Sprintf("findIdent %d not found in %s", + pos, debugFormatNode(token.NewFileSet(), root))) + } + return path, found +} + +func prepend[T any](elem T, slice ...T) []T { + return append([]T{elem}, slice...) +} + +// debugFormatNode formats a node or returns a formatting error. +// Its sloppy treatment of errors is appropriate only for logging. +func debugFormatNode(fset *token.FileSet, n ast.Node) string { + var out strings.Builder + if err := format.Node(&out, fset, n); err != nil { + out.WriteString(err.Error()) + } + return out.String() +} + +func shallowCopy[T any](ptr *T) *T { + copy := *ptr + return © +} + +// ∀ +func forall[T any](list []T, f func(i int, x T) bool) bool { + for i, x := range list { + if !f(i, x) { + return false + } + } + return true +} + +// ∃ +func exists[T any](list []T, f func(i int, x T) bool) bool { + for i, x := range list { + if f(i, x) { + return true + } + } + return false +} + +// last returns the last element of a slice, or zero if empty. +func last[T any](slice []T) T { + n := len(slice) + if n > 0 { + return slice[n-1] + } + return *new(T) +} + +// consistentOffsets reports whether the portion of caller.Content +// that corresponds to caller.Call can be parsed as a call expression. +// If not, the client has provided inconsistent information, possibly +// because they forgot to ignore line directives when computing the +// filename enclosing the call. +// This is just a heuristic. +func consistentOffsets(caller *Caller) bool { + start := offsetOf(caller.Fset, caller.Call.Pos()) + end := offsetOf(caller.Fset, caller.Call.End()) + if !(0 < start && start < end && end <= len(caller.Content)) { + return false + } + expr, err := parser.ParseExpr(string(caller.Content[start:end])) + if err != nil { + return false + } + return is[*ast.CallExpr](expr) +} + +// needsParens reports whether parens are required to avoid ambiguity +// around the new node replacing the specified old node (which is some +// ancestor of the CallExpr identified by its PathEnclosingInterval). +func needsParens(callPath []ast.Node, old, new ast.Node) bool { + // Find enclosing old node and its parent. + i := slices.Index(callPath, old) + if i == -1 { + panic("not found") + } + + // There is no precedence ambiguity when replacing + // (e.g.) a statement enclosing the call. + if !is[ast.Expr](old) { + return false + } + + // An expression beneath a non-expression + // has no precedence ambiguity. + parent, ok := callPath[i+1].(ast.Expr) + if !ok { + return false + } + + precedence := func(n ast.Node) int { + switch n := n.(type) { + case *ast.UnaryExpr, *ast.StarExpr: + return token.UnaryPrec + case *ast.BinaryExpr: + return n.Op.Precedence() + } + return -1 + } + + // Parens are not required if the new node + // is not unary or binary. + newprec := precedence(new) + if newprec < 0 { + return false + } + + // Parens are required if parent and child are both + // unary or binary and the parent has higher precedence. + if precedence(parent) > newprec { + return true + } + + // Was the old node the operand of a postfix operator? + // f().sel + // f()[i:j] + // f()[i] + // f().(T) + // f()(x) + switch parent := parent.(type) { + case *ast.SelectorExpr: + return parent.X == old + case *ast.IndexExpr: + return parent.X == old + case *ast.SliceExpr: + return parent.X == old + case *ast.TypeAssertExpr: + return parent.X == old + case *ast.CallExpr: + return parent.Fun == old + } + return false +} + +// declares returns the set of lexical names declared by a +// sequence of statements from the same block, excluding sub-blocks. +// (Lexical names do not include control labels.) +func declares(stmts []ast.Stmt) map[string]bool { + names := make(map[string]bool) + for _, stmt := range stmts { + switch stmt := stmt.(type) { + case *ast.DeclStmt: + for _, spec := range stmt.Decl.(*ast.GenDecl).Specs { + switch spec := spec.(type) { + case *ast.ValueSpec: + for _, id := range spec.Names { + names[id.Name] = true + } + case *ast.TypeSpec: + names[spec.Name.Name] = true + } + } + + case *ast.AssignStmt: + if stmt.Tok == token.DEFINE { + for _, lhs := range stmt.Lhs { + names[lhs.(*ast.Ident).Name] = true + } + } + } + } + delete(names, "_") + return names +} + +// A importNameFunc is used to query local import names in the caller, in a +// particular shadowing context. +// +// The shadow map contains additional names shadowed in the inlined code, at +// the position the local import name is to be used. The shadow map only needs +// to contain newly introduced names in the inlined code; names shadowed at the +// caller are handled automatically. +type importNameFunc = func(pkgPath string, shadow shadowMap) string + +// assignStmts rewrites a statement assigning the results of a call into zero +// or more statements that assign its return operands, or (nil, false) if no +// such rewrite is possible. The set of bindings created by the result of +// assignStmts is the same as the set of bindings created by the callerStmt. +// +// The callee must contain exactly one return statement. +// +// This is (once again) a surprisingly complex task. For example, depending on +// types and existing bindings, the assignment +// +// a, b := f() +// +// could be rewritten as: +// +// a, b := 1, 2 +// +// but may need to be written as: +// +// a, b := int8(1), int32(2) +// +// In the case where the return statement within f is a spread call to another +// function g(), we cannot explicitly convert the return values inline, and so +// it may be necessary to split the declaration and assignment of variables +// into separate statements: +// +// a, b := g() +// +// or +// +// var a int32 +// a, b = g() +// +// or +// +// var ( +// a int8 +// b int32 +// ) +// a, b = g() +// +// Note: assignStmts may return (nil, true) if it determines that the rewritten +// assignment consists only of _ = nil assignments. +func (st *state) assignStmts(callerStmt *ast.AssignStmt, returnOperands []ast.Expr, importName importNameFunc) ([]ast.Stmt, bool) { + logf, caller, callee := st.opts.Logf, st.caller, &st.callee.impl + + assert(len(callee.Returns) == 1, "unexpected multiple returns") + resultInfo := callee.Returns[0] + + // When constructing assign statements, we need to make sure that we don't + // modify types on the left-hand side, such as would happen if the type of a + // RHS expression does not match the corresponding LHS type at the caller + // (due to untyped conversion or interface widening). + // + // This turns out to be remarkably tricky to handle correctly. + // + // Substrategies below are labeled as `Substrategy :`. + + // Collect LHS information. + var ( + lhs []ast.Expr // shallow copy of the LHS slice, for mutation + defs = make([]*ast.Ident, len(callerStmt.Lhs)) // indexes in lhs of defining identifiers + blanks = make([]bool, len(callerStmt.Lhs)) // indexes in lhs of blank identifiers + byType typeutil.Map // map of distinct types -> indexes, for writing specs later + ) + for i, expr := range callerStmt.Lhs { + lhs = append(lhs, expr) + if name, ok := expr.(*ast.Ident); ok { + if name.Name == "_" { + blanks[i] = true + continue // no type + } + + if obj, isDef := caller.Info.Defs[name]; isDef { + defs[i] = name + typ := obj.Type() + idxs, _ := byType.At(typ).([]int) + idxs = append(idxs, i) + byType.Set(typ, idxs) + } + } + } + + // Collect RHS information + // + // The RHS is either a parallel assignment or spread assignment, but by + // looping over both callerStmt.Rhs and returnOperands we handle both. + var ( + rhs []ast.Expr // new RHS of assignment, owned by the inliner + callIdx = -1 // index of the call among the original RHS + nilBlankAssigns = make(map[int]unit) // indexes in rhs of _ = nil assignments, which can be deleted + freeNames = make(map[string]bool) // free(ish) names among rhs expressions + nonTrivial = make(map[int]bool) // indexes in rhs of nontrivial result conversions + ) + const includeComplitIdents = true + + for i, expr := range callerStmt.Rhs { + if expr == caller.Call { + assert(callIdx == -1, "malformed (duplicative) AST") + callIdx = i + for j, returnOperand := range returnOperands { + freeishNames(freeNames, returnOperand, includeComplitIdents) + rhs = append(rhs, returnOperand) + if resultInfo[j]&nonTrivialResult != 0 { + nonTrivial[i+j] = true + } + if blanks[i+j] && resultInfo[j]&untypedNilResult != 0 { + nilBlankAssigns[i+j] = unit{} + } + } + } else { + // We must clone before clearing positions, since e came from the caller. + expr = internalastutil.CloneNode(expr) + clearPositions(expr) + freeishNames(freeNames, expr, includeComplitIdents) + rhs = append(rhs, expr) + } + } + assert(callIdx >= 0, "failed to find call in RHS") + + // Substrategy "splice": Check to see if we can simply splice in the result + // expressions from the callee, such as simplifying + // + // x, y := f() + // + // to + // + // x, y := e1, e2 + // + // where the types of x and y match the types of e1 and e2. + // + // This works as long as we don't need to write any additional type + // information. + if len(nonTrivial) == 0 { // no non-trivial conversions to worry about + + logf("substrategy: splice assignment") + return []ast.Stmt{&ast.AssignStmt{ + Lhs: lhs, + Tok: callerStmt.Tok, + TokPos: callerStmt.TokPos, + Rhs: rhs, + }}, true + } + + // Inlining techniques below will need to write type information in order to + // preserve the correct types of LHS identifiers. + // + // typeExpr is a simple helper to write out type expressions. It currently + // handles (possibly qualified) type names. + // + // TODO(rfindley): + // 1. expand this to handle more type expressions. + // 2. refactor to share logic with callee rewriting. + universeAny := types.Universe.Lookup("any") + typeExpr := func(typ types.Type, shadow shadowMap) ast.Expr { + var ( + typeName string + obj *types.TypeName // nil for basic types + ) + if tname := typesinternal.TypeNameFor(typ); tname != nil { + obj = tname + typeName = tname.Name() + } + + // Special case: check for universe "any". + // TODO(golang/go#66921): this may become unnecessary if any becomes a proper alias. + if typ == universeAny.Type() { + typeName = "any" + } + + if typeName == "" { + return nil + } + + if obj == nil || obj.Pkg() == nil || obj.Pkg() == caller.Types { // local type or builtin + if shadow[typeName] != 0 { + logf("cannot write shadowed type name %q", typeName) + return nil + } + obj, _ := caller.lookup(typeName).(*types.TypeName) + if obj != nil && types.Identical(obj.Type(), typ) { + return ast.NewIdent(typeName) + } + } else if pkgName := importName(obj.Pkg().Path(), shadow); pkgName != "" { + return &ast.SelectorExpr{ + X: ast.NewIdent(pkgName), + Sel: ast.NewIdent(typeName), + } + } + return nil + } + + // Substrategy "spread": in the case of a spread call (func f() (T1, T2) return + // g()), since we didn't hit the 'splice' substrategy, there must be some + // non-declaring expression on the LHS. Simplify this by pre-declaring + // variables, rewriting + // + // x, y := f() + // + // to + // + // var x int + // x, y = g() + // + // Which works as long as the predeclared variables do not overlap with free + // names on the RHS. + if len(rhs) != len(lhs) { + assert(len(rhs) == 1 && len(returnOperands) == 1, "expected spread call") + + for _, id := range defs { + if id != nil && freeNames[id.Name] { + // By predeclaring variables, we're changing them to be in scope of the + // RHS. We can't do this if their names are free on the RHS. + return nil, false + } + } + + // Write out the specs, being careful to avoid shadowing free names in + // their type expressions. + var ( + specs []ast.Spec + specIdxs []int + shadow = make(shadowMap) + ) + failed := false + byType.Iterate(func(typ types.Type, v any) { + if failed { + return + } + idxs := v.([]int) + specIdxs = append(specIdxs, idxs[0]) + texpr := typeExpr(typ, shadow) + if texpr == nil { + failed = true + return + } + spec := &ast.ValueSpec{ + Type: texpr, + } + for _, idx := range idxs { + spec.Names = append(spec.Names, ast.NewIdent(defs[idx].Name)) + } + specs = append(specs, spec) + }) + if failed { + return nil, false + } + logf("substrategy: spread assignment") + return []ast.Stmt{ + &ast.DeclStmt{ + Decl: &ast.GenDecl{ + Tok: token.VAR, + Specs: specs, + }, + }, + &ast.AssignStmt{ + Lhs: callerStmt.Lhs, + Tok: token.ASSIGN, + Rhs: returnOperands, + }, + }, true + } + + assert(len(lhs) == len(rhs), "mismatching LHS and RHS") + + // Substrategy "convert": write out RHS expressions with explicit type conversions + // as necessary, rewriting + // + // x, y := f() + // + // to + // + // x, y := 1, int32(2) + // + // As required to preserve types. + // + // In the special case of _ = nil, which is disallowed by the type checker + // (since nil has no default type), we delete the assignment. + var origIdxs []int // maps back to original indexes after lhs and rhs are pruned + i := 0 + for j := range lhs { + if _, ok := nilBlankAssigns[j]; !ok { + lhs[i] = lhs[j] + rhs[i] = rhs[j] + origIdxs = append(origIdxs, j) + i++ + } + } + lhs = lhs[:i] + rhs = rhs[:i] + + if len(lhs) == 0 { + logf("trivial assignment after pruning nil blanks assigns") + // After pruning, we have no remaining assignments. + // Signal this by returning a non-nil slice of statements. + return nil, true + } + + // Write out explicit conversions as necessary. + // + // A conversion is necessary if the LHS is being defined, and the RHS return + // involved a nontrivial implicit conversion. + for i, expr := range rhs { + idx := origIdxs[i] + if nonTrivial[idx] && defs[idx] != nil { + typ := caller.Info.TypeOf(lhs[i]) + texpr := typeExpr(typ, nil) + if texpr == nil { + return nil, false + } + if _, ok := texpr.(*ast.StarExpr); ok { + // TODO(rfindley): is this necessary? Doesn't the formatter add these parens? + texpr = &ast.ParenExpr{X: texpr} // *T -> (*T) so that (*T)(x) is valid + } + rhs[i] = &ast.CallExpr{ + Fun: texpr, + Args: []ast.Expr{expr}, + } + } + } + logf("substrategy: convert assignment") + return []ast.Stmt{&ast.AssignStmt{ + Lhs: lhs, + Tok: callerStmt.Tok, + Rhs: rhs, + }}, true +} + +// tailCallSafeReturn reports whether the callee's return statements may be safely +// used to return from the function enclosing the caller (which must exist). +func tailCallSafeReturn(caller *Caller, calleeSymbol *types.Func, callee *gobCallee) bool { + // It is safe if all callee returns involve only trivial conversions. + if !hasNonTrivialReturn(callee.Returns) { + return true + } + + var callerType types.Type + // Find type of innermost function enclosing call. + // (Beware: Caller.enclosingFunc is the outermost.) +loop: + for _, n := range caller.path { + switch f := n.(type) { + case *ast.FuncDecl: + callerType = caller.Info.ObjectOf(f.Name).Type() + break loop + case *ast.FuncLit: + callerType = caller.Info.TypeOf(f) + break loop + } + } + + // Non-trivial return conversions in the callee are permitted + // if the same non-trivial conversion would occur after inlining, + // i.e. if the caller and callee results tuples are identical. + callerResults := callerType.(*types.Signature).Results() + calleeResults := calleeSymbol.Type().(*types.Signature).Results() + return types.Identical(callerResults, calleeResults) +} + +// hasNonTrivialReturn reports whether any of the returns involve a nontrivial +// implicit conversion of a result expression. +func hasNonTrivialReturn(returnInfo [][]returnOperandFlags) bool { + for _, resultInfo := range returnInfo { + for _, r := range resultInfo { + if r&nonTrivialResult != 0 { + return true + } + } + } + return false +} + +// soleUse returns the ident that refers to obj, if there is exactly one. +func soleUse(info *types.Info, obj types.Object) (sole *ast.Ident) { + // This is not efficient, but it is called infrequently. + for id, obj2 := range info.Uses { + if obj2 == obj { + if sole != nil { + return nil // not unique + } + sole = id + } + } + return sole +} + +type unit struct{} // for representing sets as maps diff --git a/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/util.go b/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/util.go new file mode 100644 index 00000000000..205e5b6aad4 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/util.go @@ -0,0 +1,168 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inline + +// This file defines various common helpers. + +import ( + "go/ast" + "go/constant" + "go/token" + "go/types" + "reflect" + "strings" + + "golang.org/x/tools/internal/typeparams" +) + +func is[T any](x any) bool { + _, ok := x.(T) + return ok +} + +func btoi(b bool) int { + if b { + return 1 + } else { + return 0 + } +} + +func offsetOf(fset *token.FileSet, pos token.Pos) int { + return fset.PositionFor(pos, false).Offset +} + +// objectKind returns an object's kind (e.g. var, func, const, typename). +func objectKind(obj types.Object) string { + return strings.TrimPrefix(strings.ToLower(reflect.TypeOf(obj).String()), "*types.") +} + +// within reports whether pos is within the half-open interval [n.Pos, n.End). +func within(pos token.Pos, n ast.Node) bool { + return n.Pos() <= pos && pos < n.End() +} + +// trivialConversion reports whether it is safe to omit the implicit +// value-to-variable conversion that occurs in argument passing or +// result return. The only case currently allowed is converting from +// untyped constant to its default type (e.g. 0 to int). +// +// The reason for this check is that converting from A to B to C may +// yield a different result than converting A directly to C: consider +// 0 to int32 to any. +// +// trivialConversion under-approximates trivial conversions, as unfortunately +// go/types does not record the type of an expression *before* it is implicitly +// converted, and therefore it cannot distinguish typed constant +// expressions from untyped constant expressions. For example, in the +// expression `c + 2`, where c is a uint32 constant, trivialConversion does not +// detect that the default type of this expression is actually uint32, not untyped +// int. +// +// We could, of course, do better here by reverse engineering some of go/types' +// constant handling. That may or may not be worthwhile. +// +// Example: in func f() int32 { return 0 }, +// the type recorded for 0 is int32, not untyped int; +// although it is Identical to the result var, +// the conversion is non-trivial. +func trivialConversion(fromValue constant.Value, from, to types.Type) bool { + if fromValue != nil { + var defaultType types.Type + switch fromValue.Kind() { + case constant.Bool: + defaultType = types.Typ[types.Bool] + case constant.String: + defaultType = types.Typ[types.String] + case constant.Int: + defaultType = types.Typ[types.Int] + case constant.Float: + defaultType = types.Typ[types.Float64] + case constant.Complex: + defaultType = types.Typ[types.Complex128] + default: + return false + } + return types.Identical(defaultType, to) + } + return types.Identical(from, to) +} + +func checkInfoFields(info *types.Info) { + assert(info.Defs != nil, "types.Info.Defs is nil") + assert(info.Implicits != nil, "types.Info.Implicits is nil") + assert(info.Scopes != nil, "types.Info.Scopes is nil") + assert(info.Selections != nil, "types.Info.Selections is nil") + assert(info.Types != nil, "types.Info.Types is nil") + assert(info.Uses != nil, "types.Info.Uses is nil") +} + +// intersects reports whether the maps' key sets intersect. +func intersects[K comparable, T1, T2 any](x map[K]T1, y map[K]T2) bool { + if len(x) > len(y) { + return intersects(y, x) + } + for k := range x { + if _, ok := y[k]; ok { + return true + } + } + return false +} + +// convert returns syntax for the conversion T(x). +func convert(T, x ast.Expr) *ast.CallExpr { + // The formatter generally adds parens as needed, + // but before go1.22 it had a bug (#63362) for + // channel types that requires this workaround. + if ch, ok := T.(*ast.ChanType); ok && ch.Dir == ast.RECV { + T = &ast.ParenExpr{X: T} + } + return &ast.CallExpr{ + Fun: T, + Args: []ast.Expr{x}, + } +} + +// isPointer reports whether t's core type is a pointer. +func isPointer(t types.Type) bool { + return is[*types.Pointer](typeparams.CoreType(t)) +} + +// indirectSelection is like seln.Indirect() without bug #8353. +func indirectSelection(seln *types.Selection) bool { + // Work around bug #8353 in Selection.Indirect when Kind=MethodVal. + if seln.Kind() == types.MethodVal { + tArg, indirect := effectiveReceiver(seln) + if indirect { + return true + } + + tParam := seln.Obj().Type().Underlying().(*types.Signature).Recv().Type() + return isPointer(tArg) && !isPointer(tParam) // implicit * + } + + return seln.Indirect() +} + +// effectiveReceiver returns the effective type of the method +// receiver after all implicit field selections (but not implicit * or +// & operations) have been applied. +// +// The boolean indicates whether any implicit field selection was indirect. +func effectiveReceiver(seln *types.Selection) (types.Type, bool) { + assert(seln.Kind() == types.MethodVal, "not MethodVal") + t := seln.Recv() + indices := seln.Index() + indirect := false + for _, index := range indices[:len(indices)-1] { + if isPointer(t) { + indirect = true + t = typeparams.MustDeref(t) + } + t = typeparams.CoreType(t).(*types.Struct).Field(index).Type() + } + return t, indirect +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/refactor/refactor.go b/src/cmd/vendor/golang.org/x/tools/internal/refactor/refactor.go new file mode 100644 index 00000000000..27b97508961 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/refactor/refactor.go @@ -0,0 +1,29 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package refactor provides operators to compute common textual edits +// for refactoring tools. +// +// This package should not use features of the analysis API +// other than [analysis.TextEdit]. +package refactor + +import ( + "fmt" + "go/token" + "go/types" +) + +// FreshName returns the name of an identifier that is undefined +// at the specified position, based on the preferred name. +func FreshName(scope *types.Scope, pos token.Pos, preferred string) string { + newName := preferred + for i := 0; ; i++ { + if _, obj := scope.LookupParent(newName, pos); obj == nil { + break // fresh + } + newName = fmt.Sprintf("%s%d", preferred, i) + } + return newName +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/fx.go b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/fx.go new file mode 100644 index 00000000000..c846a53d5fe --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/fx.go @@ -0,0 +1,88 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/ast" + "go/token" + "go/types" +) + +// NoEffects reports whether the expression has no side effects, i.e., it +// does not modify the memory state. This function is conservative: it may +// return false even when the expression has no effect. +func NoEffects(info *types.Info, expr ast.Expr) bool { + noEffects := true + ast.Inspect(expr, func(n ast.Node) bool { + switch v := n.(type) { + case nil, *ast.Ident, *ast.BasicLit, *ast.BinaryExpr, *ast.ParenExpr, + *ast.SelectorExpr, *ast.IndexExpr, *ast.SliceExpr, *ast.TypeAssertExpr, + *ast.StarExpr, *ast.CompositeLit, + // non-expressions that may appear within expressions + *ast.KeyValueExpr, + *ast.FieldList, + *ast.Field, + *ast.Ellipsis, + *ast.IndexListExpr: + // No effect. + + case *ast.ArrayType, + *ast.StructType, + *ast.ChanType, + *ast.FuncType, + *ast.MapType, + *ast.InterfaceType: + // Type syntax: no effects, recursively. + // Prune descent. + return false + + case *ast.UnaryExpr: + // Channel send <-ch has effects. + if v.Op == token.ARROW { + noEffects = false + } + + case *ast.CallExpr: + // Type conversion has no effects. + if !info.Types[v.Fun].IsType() { + if CallsPureBuiltin(info, v) { + // A call such as len(e) has no effects of its + // own, though the subexpression e might. + } else { + noEffects = false + } + } + + case *ast.FuncLit: + // A FuncLit has no effects, but do not descend into it. + return false + + default: + // All other expressions have effects + noEffects = false + } + + return noEffects + }) + return noEffects +} + +// CallsPureBuiltin reports whether call is a call of a built-in +// function that is a pure computation over its operands (analogous to +// a + operator). Because it does not depend on program state, it may +// be evaluated at any point--though not necessarily at multiple +// points (consider new, make). +func CallsPureBuiltin(info *types.Info, call *ast.CallExpr) bool { + if id, ok := ast.Unparen(call.Fun).(*ast.Ident); ok { + if b, ok := info.ObjectOf(id).(*types.Builtin); ok { + switch b.Name() { + case "len", "cap", "complex", "imag", "real", "make", "new", "max", "min": + return true + } + // Not: append clear close copy delete panic print println recover + } + } + return false +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go new file mode 100644 index 00000000000..f2affec4fba --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go @@ -0,0 +1,71 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/types" + "slices" +) + +// IsTypeNamed reports whether t is (or is an alias for) a +// package-level defined type with the given package path and one of +// the given names. It returns false if t is nil. +// +// This function avoids allocating the concatenation of "pkg.Name", +// which is important for the performance of syntax matching. +func IsTypeNamed(t types.Type, pkgPath string, names ...string) bool { + if named, ok := types.Unalias(t).(*types.Named); ok { + tname := named.Obj() + return tname != nil && + IsPackageLevel(tname) && + tname.Pkg().Path() == pkgPath && + slices.Contains(names, tname.Name()) + } + return false +} + +// IsPointerToNamed reports whether t is (or is an alias for) a pointer to a +// package-level defined type with the given package path and one of the given +// names. It returns false if t is not a pointer type. +func IsPointerToNamed(t types.Type, pkgPath string, names ...string) bool { + r := Unpointer(t) + if r == t { + return false + } + return IsTypeNamed(r, pkgPath, names...) +} + +// IsFunctionNamed reports whether obj is a package-level function +// defined in the given package and has one of the given names. +// It returns false if obj is nil. +// +// This function avoids allocating the concatenation of "pkg.Name", +// which is important for the performance of syntax matching. +func IsFunctionNamed(obj types.Object, pkgPath string, names ...string) bool { + f, ok := obj.(*types.Func) + return ok && + IsPackageLevel(obj) && + f.Pkg().Path() == pkgPath && + f.Type().(*types.Signature).Recv() == nil && + slices.Contains(names, f.Name()) +} + +// IsMethodNamed reports whether obj is a method defined on a +// package-level type with the given package and type name, and has +// one of the given names. It returns false if obj is nil. +// +// This function avoids allocating the concatenation of "pkg.TypeName.Name", +// which is important for the performance of syntax matching. +func IsMethodNamed(obj types.Object, pkgPath string, typeName string, names ...string) bool { + if fn, ok := obj.(*types.Func); ok { + if recv := fn.Type().(*types.Signature).Recv(); recv != nil { + _, T := ReceiverNamed(recv) + return T != nil && + IsTypeNamed(T, pkgPath, typeName) && + slices.Contains(names, fn.Name()) + } + } + return false +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/types.go b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/types.go index a5cd7e8dbfc..fef74a78560 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -2,8 +2,20 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package typesinternal provides access to internal go/types APIs that are not -// yet exported. +// Package typesinternal provides helpful operators for dealing with +// go/types: +// +// - operators for querying typed syntax trees (e.g. [Imports], [IsFunctionNamed]); +// - functions for converting types to strings or syntax (e.g. [TypeExpr], FileQualifier]); +// - helpers for working with the [go/types] API (e.g. [NewTypesInfo]); +// - access to internal go/types APIs that are not yet +// exported (e.g. [SetUsesCgo], [ErrorCodeStartEnd], [VarKind]); and +// - common algorithms related to types (e.g. [TooNewStdSymbols]). +// +// See also: +// - [golang.org/x/tools/internal/astutil], for operations on untyped syntax; +// - [golang.org/x/tools/internal/analysisinernal], for helpers for analyzers; +// - [golang.org/x/tools/internal/refactor], for operators to compute text edits. package typesinternal import ( @@ -13,6 +25,7 @@ import ( "reflect" "unsafe" + "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/internal/aliases" ) @@ -60,6 +73,9 @@ func ErrorCodeStartEnd(err types.Error) (code ErrorCode, start, end token.Pos, o // which is often excessive.) // // If pkg is nil, it is equivalent to [*types.Package.Name]. +// +// TODO(adonovan): all uses of this with TypeString should be +// eliminated when https://go.dev/issues/75604 is resolved. func NameRelativeTo(pkg *types.Package) types.Qualifier { return func(other *types.Package) string { if pkg != nil && pkg == other { @@ -153,3 +169,31 @@ func NewTypesInfo() *types.Info { FileVersions: map[*ast.File]string{}, } } + +// EnclosingScope returns the innermost block logically enclosing the cursor. +func EnclosingScope(info *types.Info, cur inspector.Cursor) *types.Scope { + for cur := range cur.Enclosing() { + n := cur.Node() + // A function's Scope is associated with its FuncType. + switch f := n.(type) { + case *ast.FuncDecl: + n = f.Type + case *ast.FuncLit: + n = f.Type + } + if b := info.Scopes[n]; b != nil { + return b + } + } + panic("no Scope for *ast.File") +} + +// Imports reports whether path is imported by pkg. +func Imports(pkg *types.Package, path string) bool { + for _, imp := range pkg.Imports() { + if imp.Path() == path { + return true + } + } + return false +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/varkind.go b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/varkind.go index e5da0495111..26499cdd2e7 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/varkind.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/varkind.go @@ -2,39 +2,22 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package typesinternal +//go:build go1.25 -// TODO(adonovan): when CL 645115 lands, define the go1.25 version of -// this API that actually does something. +package typesinternal import "go/types" -type VarKind uint8 +type VarKind = types.VarKind const ( - _ VarKind = iota // (not meaningful) - PackageVar // a package-level variable - LocalVar // a local variable - RecvVar // a method receiver variable - ParamVar // a function parameter variable - ResultVar // a function result variable - FieldVar // a struct field + PackageVar = types.PackageVar + LocalVar = types.LocalVar + RecvVar = types.RecvVar + ParamVar = types.ParamVar + ResultVar = types.ResultVar + FieldVar = types.FieldVar ) -func (kind VarKind) String() string { - return [...]string{ - 0: "VarKind(0)", - PackageVar: "PackageVar", - LocalVar: "LocalVar", - RecvVar: "RecvVar", - ParamVar: "ParamVar", - ResultVar: "ResultVar", - FieldVar: "FieldVar", - }[kind] -} - -// GetVarKind returns an invalid VarKind. -func GetVarKind(v *types.Var) VarKind { return 0 } - -// SetVarKind has no effect. -func SetVarKind(v *types.Var, kind VarKind) {} +func GetVarKind(v *types.Var) VarKind { return v.Kind() } +func SetVarKind(v *types.Var, kind VarKind) { v.SetKind(kind) } diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/varkind_go124.go b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/varkind_go124.go new file mode 100644 index 00000000000..17b1804b4e8 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/varkind_go124.go @@ -0,0 +1,39 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.25 + +package typesinternal + +import "go/types" + +type VarKind uint8 + +const ( + _ VarKind = iota // (not meaningful) + PackageVar // a package-level variable + LocalVar // a local variable + RecvVar // a method receiver variable + ParamVar // a function parameter variable + ResultVar // a function result variable + FieldVar // a struct field +) + +func (kind VarKind) String() string { + return [...]string{ + 0: "VarKind(0)", + PackageVar: "PackageVar", + LocalVar: "LocalVar", + RecvVar: "RecvVar", + ParamVar: "ParamVar", + ResultVar: "ResultVar", + FieldVar: "FieldVar", + }[kind] +} + +// GetVarKind returns an invalid VarKind. +func GetVarKind(v *types.Var) VarKind { return 0 } + +// SetVarKind has no effect. +func SetVarKind(v *types.Var, kind VarKind) {} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go index d272949c177..453bba2ad5e 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go @@ -204,23 +204,12 @@ func ZeroExpr(t types.Type, qual types.Qualifier) (_ ast.Expr, isValid bool) { } } -// IsZeroExpr uses simple syntactic heuristics to report whether expr -// is a obvious zero value, such as 0, "", nil, or false. -// It cannot do better without type information. -func IsZeroExpr(expr ast.Expr) bool { - switch e := expr.(type) { - case *ast.BasicLit: - return e.Value == "0" || e.Value == `""` - case *ast.Ident: - return e.Name == "nil" || e.Name == "false" - default: - return false - } -} - // TypeExpr returns syntax for the specified type. References to named types // are qualified by an appropriate (optional) qualifier function. // It may panic for types such as Tuple or Union. +// +// See also https://go.dev/issues/75604, which will provide a robust +// Type-to-valid-Go-syntax formatter. func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr { switch t := t.(type) { case *types.Basic: diff --git a/src/cmd/vendor/modules.txt b/src/cmd/vendor/modules.txt index 133271355f2..e92804a90d3 100644 --- a/src/cmd/vendor/modules.txt +++ b/src/cmd/vendor/modules.txt @@ -16,8 +16,8 @@ github.com/google/pprof/third_party/svgpan # github.com/ianlancetaylor/demangle v0.0.0-20250417193237-f615e6bd150b ## explicit; go 1.13 github.com/ianlancetaylor/demangle -# golang.org/x/arch v0.20.1-0.20250808194827-46ba08e3ae58 -## explicit; go 1.23.0 +# golang.org/x/arch v0.22.1-0.20251016010524-fea4a9ec4938 +## explicit; go 1.24.0 golang.org/x/arch/arm/armasm golang.org/x/arch/arm64/arm64asm golang.org/x/arch/loong64/loong64asm @@ -28,7 +28,7 @@ golang.org/x/arch/x86/x86asm # golang.org/x/build v0.0.0-20250806225920-b7c66c047964 ## explicit; go 1.23.0 golang.org/x/build/relnote -# golang.org/x/mod v0.28.0 +# golang.org/x/mod v0.29.0 ## explicit; go 1.24.0 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile @@ -43,12 +43,12 @@ golang.org/x/mod/zip ## explicit; go 1.24.0 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.36.0 +# golang.org/x/sys v0.37.0 ## explicit; go 1.24.0 golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053 +# golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 ## explicit; go 1.24.0 golang.org/x/telemetry golang.org/x/telemetry/counter @@ -63,7 +63,7 @@ golang.org/x/telemetry/internal/upload # golang.org/x/term v0.34.0 ## explicit; go 1.23.0 golang.org/x/term -# golang.org/x/text v0.29.0 +# golang.org/x/text v0.30.0 ## explicit; go 1.24.0 golang.org/x/text/cases golang.org/x/text/internal @@ -73,7 +73,7 @@ golang.org/x/text/internal/tag golang.org/x/text/language golang.org/x/text/transform golang.org/x/text/unicode/norm -# golang.org/x/tools v0.37.1-0.20250924232827-4df13e317ce4 +# golang.org/x/tools v0.38.1-0.20251015192825-7d9453ccc0f5 ## explicit; go 1.24.0 golang.org/x/tools/cmd/bisect golang.org/x/tools/cover @@ -96,10 +96,12 @@ golang.org/x/tools/go/analysis/passes/framepointer golang.org/x/tools/go/analysis/passes/hostport golang.org/x/tools/go/analysis/passes/httpresponse golang.org/x/tools/go/analysis/passes/ifaceassert +golang.org/x/tools/go/analysis/passes/inline golang.org/x/tools/go/analysis/passes/inspect -golang.org/x/tools/go/analysis/passes/internal/analysisutil +golang.org/x/tools/go/analysis/passes/internal/gofixdirective golang.org/x/tools/go/analysis/passes/loopclosure golang.org/x/tools/go/analysis/passes/lostcancel +golang.org/x/tools/go/analysis/passes/modernize golang.org/x/tools/go/analysis/passes/nilfunc golang.org/x/tools/go/analysis/passes/printf golang.org/x/tools/go/analysis/passes/shift @@ -118,6 +120,7 @@ golang.org/x/tools/go/analysis/passes/unsafeptr golang.org/x/tools/go/analysis/passes/unusedresult golang.org/x/tools/go/analysis/passes/waitgroup golang.org/x/tools/go/analysis/unitchecker +golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/ast/edge golang.org/x/tools/go/ast/inspector golang.org/x/tools/go/cfg @@ -125,6 +128,7 @@ golang.org/x/tools/go/types/objectpath golang.org/x/tools/go/types/typeutil golang.org/x/tools/internal/aliases golang.org/x/tools/internal/analysisinternal +golang.org/x/tools/internal/analysisinternal/generated golang.org/x/tools/internal/analysisinternal/typeindex golang.org/x/tools/internal/astutil golang.org/x/tools/internal/bisect @@ -132,7 +136,11 @@ golang.org/x/tools/internal/diff golang.org/x/tools/internal/diff/lcs golang.org/x/tools/internal/facts golang.org/x/tools/internal/fmtstr +golang.org/x/tools/internal/goplsexport golang.org/x/tools/internal/moreiters +golang.org/x/tools/internal/packagepath +golang.org/x/tools/internal/refactor +golang.org/x/tools/internal/refactor/inline golang.org/x/tools/internal/stdlib golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal diff --git a/src/cmd/vet/doc.go b/src/cmd/vet/doc.go index 8e72c252ed9..ca208845615 100644 --- a/src/cmd/vet/doc.go +++ b/src/cmd/vet/doc.go @@ -40,6 +40,7 @@ To list the available checks, run "go tool vet help": directive check Go toolchain directives such as //go:debug errorsas report passing non-pointer or non-error values to errors.As framepointer report assembly that clobbers the frame pointer before saving it + hostport check format of addresses passed to net.Dial httpresponse check for mistakes using HTTP responses ifaceassert detect impossible interface-to-interface type assertions loopclosure check references to loop variables from within nested functions @@ -50,6 +51,7 @@ To list the available checks, run "go tool vet help": sigchanyzer check for unbuffered channel of os.Signal slog check for invalid structured logging calls stdmethods check signature of methods of well-known interfaces + stdversion report uses of too-new standard library symbols stringintconv check for string(int) conversions structtag check that struct field tags conform to reflect.StructTag.Get testinggoroutine report calls to (*testing.T).Fatal from goroutines started by a test diff --git a/src/cmd/vet/main.go b/src/cmd/vet/main.go index 49f4e2f3425..e7164a46b0a 100644 --- a/src/cmd/vet/main.go +++ b/src/cmd/vet/main.go @@ -7,10 +7,8 @@ package main import ( "cmd/internal/objabi" "cmd/internal/telemetry/counter" - "flag" - - "golang.org/x/tools/go/analysis/unitchecker" + "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/appends" "golang.org/x/tools/go/analysis/passes/asmdecl" "golang.org/x/tools/go/analysis/passes/assign" @@ -46,52 +44,57 @@ import ( "golang.org/x/tools/go/analysis/passes/unsafeptr" "golang.org/x/tools/go/analysis/passes/unusedresult" "golang.org/x/tools/go/analysis/passes/waitgroup" + "golang.org/x/tools/go/analysis/unitchecker" ) func main() { + // Keep consistent with cmd/fix/main.go! counter.Open() objabi.AddVersionFlag() - counter.Inc("vet/invocations") - unitchecker.Main( - appends.Analyzer, - asmdecl.Analyzer, - assign.Analyzer, - atomic.Analyzer, - bools.Analyzer, - buildtag.Analyzer, - cgocall.Analyzer, - composite.Analyzer, - copylock.Analyzer, - defers.Analyzer, - directive.Analyzer, - errorsas.Analyzer, - framepointer.Analyzer, - httpresponse.Analyzer, - hostport.Analyzer, - ifaceassert.Analyzer, - loopclosure.Analyzer, - lostcancel.Analyzer, - nilfunc.Analyzer, - printf.Analyzer, - shift.Analyzer, - sigchanyzer.Analyzer, - slog.Analyzer, - stdmethods.Analyzer, - stdversion.Analyzer, - stringintconv.Analyzer, - structtag.Analyzer, - tests.Analyzer, - testinggoroutine.Analyzer, - timeformat.Analyzer, - unmarshal.Analyzer, - unreachable.Analyzer, - unsafeptr.Analyzer, - unusedresult.Analyzer, - waitgroup.Analyzer, - ) - // It's possible that unitchecker will exit early. In - // those cases the flags won't be counted. - counter.CountFlags("vet/flag:", *flag.CommandLine) + unitchecker.Main(suite...) // (never returns) +} + +// The vet suite analyzers report diagnostics. +// (Diagnostics must describe real problems, but need not +// suggest fixes, and fixes are not necessarily safe to apply.) +var suite = []*analysis.Analyzer{ + appends.Analyzer, + asmdecl.Analyzer, + assign.Analyzer, + atomic.Analyzer, + bools.Analyzer, + buildtag.Analyzer, + cgocall.Analyzer, + composite.Analyzer, + copylock.Analyzer, + defers.Analyzer, + directive.Analyzer, + errorsas.Analyzer, + // fieldalignment.Analyzer omitted: too noisy + framepointer.Analyzer, + httpresponse.Analyzer, + hostport.Analyzer, + ifaceassert.Analyzer, + loopclosure.Analyzer, + lostcancel.Analyzer, + nilfunc.Analyzer, + printf.Analyzer, + // shadow.Analyzer omitted: too noisy + shift.Analyzer, + sigchanyzer.Analyzer, + slog.Analyzer, + stdmethods.Analyzer, + stdversion.Analyzer, + stringintconv.Analyzer, + structtag.Analyzer, + tests.Analyzer, + testinggoroutine.Analyzer, + timeformat.Analyzer, + unmarshal.Analyzer, + unreachable.Analyzer, + unsafeptr.Analyzer, + unusedresult.Analyzer, + waitgroup.Analyzer, } diff --git a/src/cmd/vet/testdata/print/print.go b/src/cmd/vet/testdata/print/print.go index e00222c42b5..3761da420be 100644 --- a/src/cmd/vet/testdata/print/print.go +++ b/src/cmd/vet/testdata/print/print.go @@ -162,7 +162,7 @@ func PrintfTests() { Printf("hi") // ok const format = "%s %s\n" Printf(format, "hi", "there") - Printf(format, "hi") // ERROR "Printf format %s reads arg #2, but call has 1 arg$" + Printf(format, "hi") // ERROR "Printf format %s reads arg #2, but call has 1 arg" Printf("%s %d %.3v %q", "str", 4) // ERROR "Printf format %.3v reads arg #3, but call has 2 args" f := new(ptrStringer) f.Warn(0, "%s", "hello", 3) // ERROR "Warn call has possible Printf formatting directive %s" diff --git a/src/cmd/vet/vet_test.go b/src/cmd/vet/vet_test.go index 54eabca938c..1f2d6925ad4 100644 --- a/src/cmd/vet/vet_test.go +++ b/src/cmd/vet/vet_test.go @@ -4,6 +4,10 @@ package main +// TODO(adonovan): replace this test by a script test +// in cmd/go/testdata/script/vet_suite.txt like we do +// for 'go fix'. + import ( "bytes" "errors" @@ -28,7 +32,8 @@ func TestMain(m *testing.M) { os.Exit(0) } - os.Setenv("GO_VETTEST_IS_VET", "1") // Set for subprocesses to inherit. + // Set for subprocesses to inherit. + os.Setenv("GO_VETTEST_IS_VET", "1") // ignore error os.Exit(m.Run()) } @@ -115,7 +120,7 @@ func TestVet(t *testing.T) { cmd.Env = append(os.Environ(), "GOWORK=off") cmd.Dir = "testdata/rangeloop" cmd.Stderr = new(strings.Builder) // all vet output goes to stderr - cmd.Run() + cmd.Run() // ignore error stderr := cmd.Stderr.(fmt.Stringer).String() filename := filepath.FromSlash("testdata/rangeloop/rangeloop.go") @@ -134,7 +139,7 @@ func TestVet(t *testing.T) { if err := errorCheck(stderr, false, filename, filepath.Base(filename)); err != nil { t.Errorf("error check failed: %s", err) - t.Log("vet stderr:\n", cmd.Stderr) + t.Logf("vet stderr:\n<<%s>>", cmd.Stderr) } }) @@ -146,7 +151,7 @@ func TestVet(t *testing.T) { cmd.Env = append(os.Environ(), "GOWORK=off") cmd.Dir = "testdata/stdversion" cmd.Stderr = new(strings.Builder) // all vet output goes to stderr - cmd.Run() + cmd.Run() // ignore error stderr := cmd.Stderr.(fmt.Stringer).String() filename := filepath.FromSlash("testdata/stdversion/stdversion.go") @@ -165,7 +170,7 @@ func TestVet(t *testing.T) { if err := errorCheck(stderr, false, filename, filepath.Base(filename)); err != nil { t.Errorf("error check failed: %s", err) - t.Log("vet stderr:\n", cmd.Stderr) + t.Logf("vet stderr:\n<<%s>>", cmd.Stderr) } }) } @@ -184,7 +189,7 @@ func cgoEnabled(t *testing.T) bool { func errchk(c *exec.Cmd, files []string, t *testing.T) { output, err := c.CombinedOutput() if _, ok := err.(*exec.ExitError); !ok { - t.Logf("vet output:\n%s", output) + t.Logf("vet output:\n<<%s>>", output) t.Fatal(err) } fullshort := make([]string, 0, len(files)*2) @@ -205,7 +210,6 @@ func TestTags(t *testing.T) { "x testtag y": 1, "othertag": 2, } { - tag, wantFile := tag, wantFile t.Run(tag, func(t *testing.T) { t.Parallel() t.Logf("-tags=%s", tag) @@ -266,7 +270,7 @@ func errorCheck(outStr string, wantAuto bool, fullshort ...string) (err error) { errmsgs, out = partitionStrings(we.prefix, out) } if len(errmsgs) == 0 { - errs = append(errs, fmt.Errorf("%s:%d: missing error %q", we.file, we.lineNum, we.reStr)) + errs = append(errs, fmt.Errorf("%s:%d: missing error %q (prefix: %s)", we.file, we.lineNum, we.reStr, we.prefix)) continue } matched := false diff --git a/src/compress/flate/example_test.go b/src/compress/flate/example_test.go index 578009248f5..57e059f37a7 100644 --- a/src/compress/flate/example_test.go +++ b/src/compress/flate/example_test.go @@ -175,7 +175,7 @@ func Example_synchronization() { } b := make([]byte, 256) - for _, m := range strings.Fields("A long time ago in a galaxy far, far away...") { + for m := range strings.FieldsSeq("A long time ago in a galaxy far, far away...") { // We use a simple framing format where the first byte is the // message length, followed the message itself. b[0] = uint8(copy(b[1:], m)) diff --git a/src/context/x_test.go b/src/context/x_test.go index 0cf19688c3f..aeb5470399f 100644 --- a/src/context/x_test.go +++ b/src/context/x_test.go @@ -838,7 +838,6 @@ func TestCause(t *testing.T) { cause: parentCause, }, } { - test := test t.Run(test.name, func(t *testing.T) { t.Parallel() ctx := test.ctx() diff --git a/src/crypto/cipher/ctr_aes_test.go b/src/crypto/cipher/ctr_aes_test.go index 33942467784..9b7d30e2164 100644 --- a/src/crypto/cipher/ctr_aes_test.go +++ b/src/crypto/cipher/ctr_aes_test.go @@ -145,7 +145,6 @@ func TestCTR_AES_multiblock_random_IV(t *testing.T) { const Size = 100 for _, keySize := range []int{16, 24, 32} { - keySize := keySize t.Run(fmt.Sprintf("keySize=%d", keySize), func(t *testing.T) { key := randBytes(t, r, keySize) aesBlock, err := aes.NewCipher(key) @@ -164,10 +163,8 @@ func TestCTR_AES_multiblock_random_IV(t *testing.T) { // individually using multiblock implementation to catch edge cases. for part1 := 0; part1 <= Size; part1++ { - part1 := part1 t.Run(fmt.Sprintf("part1=%d", part1), func(t *testing.T) { for part2 := 0; part2 <= Size-part1; part2++ { - part2 := part2 t.Run(fmt.Sprintf("part2=%d", part2), func(t *testing.T) { _, multiblockCtr := makeTestingCiphers(aesBlock, iv) multiblockCiphertext := make([]byte, len(plaintext)) @@ -216,7 +213,6 @@ func TestCTR_AES_multiblock_overflow_IV(t *testing.T) { } for _, keySize := range []int{16, 24, 32} { - keySize := keySize t.Run(fmt.Sprintf("keySize=%d", keySize), func(t *testing.T) { for _, iv := range ivs { key := randBytes(t, r, keySize) @@ -227,7 +223,6 @@ func TestCTR_AES_multiblock_overflow_IV(t *testing.T) { t.Run(fmt.Sprintf("iv=%s", hex.EncodeToString(iv)), func(t *testing.T) { for _, offset := range []int{0, 1, 16, 1024} { - offset := offset t.Run(fmt.Sprintf("offset=%d", offset), func(t *testing.T) { genericCtr, multiblockCtr := makeTestingCiphers(aesBlock, iv) @@ -260,7 +255,6 @@ func TestCTR_AES_multiblock_XORKeyStreamAt(t *testing.T) { plaintext := randBytes(t, r, Size) for _, keySize := range []int{16, 24, 32} { - keySize := keySize t.Run(fmt.Sprintf("keySize=%d", keySize), func(t *testing.T) { key := randBytes(t, r, keySize) iv := randBytes(t, r, aesBlockSize) diff --git a/src/crypto/internal/constanttime/constant_time.go b/src/crypto/internal/constanttime/constant_time.go new file mode 100644 index 00000000000..55253071956 --- /dev/null +++ b/src/crypto/internal/constanttime/constant_time.go @@ -0,0 +1,42 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package constanttime + +// The functions in this package are compiler intrinsics for constant-time +// operations. They are exposed by crypto/subtle and used directly by the +// FIPS 140-3 module. + +// Select returns x if v == 1 and y if v == 0. +// Its behavior is undefined if v takes any other value. +func Select(v, x, y int) int { + // This is intrinsicified on arches with CMOV. + // It implements the following superset behavior: + // ConstantTimeSelect returns x if v != 0 and y if v == 0. + // Do the same here to avoid non portable UB. + v = int(boolToUint8(v != 0)) + return ^(v-1)&x | (v-1)&y +} + +// ByteEq returns 1 if x == y and 0 otherwise. +func ByteEq(x, y uint8) int { + return int(boolToUint8(x == y)) +} + +// Eq returns 1 if x == y and 0 otherwise. +func Eq(x, y int32) int { + return int(boolToUint8(x == y)) +} + +// LessOrEq returns 1 if x <= y and 0 otherwise. +// Its behavior is undefined if x or y are negative or > 2**31 - 1. +func LessOrEq(x, y int) int { + return int(boolToUint8(x <= y)) +} + +// boolToUint8 is a compiler intrinsic. +// It returns 1 for true and 0 for false. +func boolToUint8(b bool) uint8 { + panic("unreachable; must be intrinsicified") +} diff --git a/src/crypto/internal/cryptotest/methods.go b/src/crypto/internal/cryptotest/methods.go index 9105eb30aa0..f7d48a0fb80 100644 --- a/src/crypto/internal/cryptotest/methods.go +++ b/src/crypto/internal/cryptotest/methods.go @@ -19,7 +19,7 @@ import ( // of the API even if undocumented per Hyrum's Law. // // ms must be a pointer to a non-nil interface. -func NoExtraMethods(t *testing.T, ms interface{}, allowed ...string) { +func NoExtraMethods(t *testing.T, ms any, allowed ...string) { t.Helper() extraMethods, err := extraMethods(ms) if err != nil { @@ -33,7 +33,7 @@ func NoExtraMethods(t *testing.T, ms interface{}, allowed ...string) { } } -func extraMethods(ip interface{}) ([]string, error) { +func extraMethods(ip any) ([]string, error) { v := reflect.ValueOf(ip) if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Interface || v.Elem().IsNil() { return nil, fmt.Errorf("argument must be a pointer to a non-nil interface") diff --git a/src/crypto/internal/entropy/entropy.go b/src/crypto/internal/entropy/entropy.go index 73fd5298007..6e16f19de52 100644 --- a/src/crypto/internal/entropy/entropy.go +++ b/src/crypto/internal/entropy/entropy.go @@ -4,8 +4,10 @@ // Package entropy provides the passive entropy source for the FIPS 140-3 // module. It is only used in FIPS mode by [crypto/internal/fips140/drbg.Read] -// from the FIPS 140-3 Go Cryptographic Module v1.0.0. Later versions of the -// module have an internal CPU jitter-based entropy source. +// from the FIPS 140-3 Go Cryptographic Module v1.0.0. +// +// Later versions of the module use the CPU jitter-based entropy source in the +// crypto/internal/entropy/v1.0.0 sub-package. // // This complied with IG 9.3.A, Additional Comment 12, which until January 1, // 2026 allows new modules to meet an [earlier version] of Resolution 2(b): diff --git a/src/crypto/internal/fips140/entropy/entropy.go b/src/crypto/internal/entropy/v1.0.0/entropy.go similarity index 98% rename from src/crypto/internal/fips140/entropy/entropy.go rename to src/crypto/internal/entropy/v1.0.0/entropy.go index 273f05c817a..f5b2f53752a 100644 --- a/src/crypto/internal/fips140/entropy/entropy.go +++ b/src/crypto/internal/entropy/v1.0.0/entropy.go @@ -123,7 +123,9 @@ func (s *source) Sample() uint8 { // Perform a few memory accesses in an unpredictable pattern to expose the // next measurement to as much system noise as possible. memory, lcgState := s.memory, s.lcgState - _ = memory[0] // hoist the nil check out of touchMemory + if memory == nil { // remove the nil check from the inlined touchMemory calls + panic("entropy: nil memory buffer") + } for range 64 { lcgState = 1664525*lcgState + 1013904223 // Discard the lower bits, which tend to fall into short cycles. diff --git a/src/crypto/internal/fips140/entropy/sha384.go b/src/crypto/internal/entropy/v1.0.0/sha384.go similarity index 81% rename from src/crypto/internal/fips140/entropy/sha384.go rename to src/crypto/internal/entropy/v1.0.0/sha384.go index ec23cfc9ad3..c20f76b5797 100644 --- a/src/crypto/internal/fips140/entropy/sha384.go +++ b/src/crypto/internal/entropy/v1.0.0/sha384.go @@ -8,19 +8,22 @@ import "math/bits" // This file includes a SHA-384 implementation to insulate the entropy source // from any changes in the FIPS 140-3 module's crypto/internal/fips140/sha512 -// package. We only support 1024-byte inputs. +// package. We support 1024-byte inputs for the entropy source, and arbitrary +// length inputs for ACVP testing. + +var initState = [8]uint64{ + 0xcbbb9d5dc1059ed8, + 0x629a292a367cd507, + 0x9159015a3070dd17, + 0x152fecd8f70e5939, + 0x67332667ffc00b31, + 0x8eb44a8768581511, + 0xdb0c2e0d64f98fa7, + 0x47b5481dbefa4fa4, +} func SHA384(p *[1024]byte) [48]byte { - h := [8]uint64{ - 0xcbbb9d5dc1059ed8, - 0x629a292a367cd507, - 0x9159015a3070dd17, - 0x152fecd8f70e5939, - 0x67332667ffc00b31, - 0x8eb44a8768581511, - 0xdb0c2e0d64f98fa7, - 0x47b5481dbefa4fa4, - } + h := initState sha384Block(&h, (*[128]byte)(p[0:128])) sha384Block(&h, (*[128]byte)(p[128:256])) @@ -36,6 +39,38 @@ func SHA384(p *[1024]byte) [48]byte { bePutUint64(padlen[112+8:], 1024*8) sha384Block(&h, &padlen) + return digestBytes(&h) +} + +func TestingOnlySHA384(p []byte) [48]byte { + if len(p) == 1024 { + return SHA384((*[1024]byte)(p)) + } + + h := initState + bitLen := uint64(len(p)) * 8 + + // Process full 128-byte blocks. + for len(p) >= 128 { + sha384Block(&h, (*[128]byte)(p[:128])) + p = p[128:] + } + + // Process final block and padding. + var finalBlock [128]byte + copy(finalBlock[:], p) + finalBlock[len(p)] = 0x80 + if len(p) >= 112 { + sha384Block(&h, &finalBlock) + finalBlock = [128]byte{} + } + bePutUint64(finalBlock[112+8:], bitLen) + sha384Block(&h, &finalBlock) + + return digestBytes(&h) +} + +func digestBytes(h *[8]uint64) [48]byte { var digest [48]byte bePutUint64(digest[0:], h[0]) bePutUint64(digest[8:], h[1]) diff --git a/src/crypto/internal/fips140/aes/_asm/ctr/ctr_amd64_asm.go b/src/crypto/internal/fips140/aes/_asm/ctr/ctr_amd64_asm.go index 35e1d8aeb62..775d4a8acc5 100644 --- a/src/crypto/internal/fips140/aes/_asm/ctr/ctr_amd64_asm.go +++ b/src/crypto/internal/fips140/aes/_asm/ctr/ctr_amd64_asm.go @@ -16,7 +16,7 @@ import ( //go:generate go run . -out ../../ctr_amd64.s func main() { - Package("crypto/aes") + Package("crypto/internal/fips140/aes") ConstraintExpr("!purego") ctrBlocks(1) diff --git a/src/crypto/internal/fips140/aes/_asm/ctr/go.mod b/src/crypto/internal/fips140/aes/_asm/ctr/go.mod index 5d97cd7f4e6..80aac1559c1 100644 --- a/src/crypto/internal/fips140/aes/_asm/ctr/go.mod +++ b/src/crypto/internal/fips140/aes/_asm/ctr/go.mod @@ -1,11 +1,11 @@ module crypto/aes/_asm/ctr -go 1.24 +go 1.25 require github.com/mmcloughlin/avo v0.6.0 require ( - golang.org/x/mod v0.20.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/mod v0.29.0 // indirect + golang.org/x/sync v0.17.0 // indirect + golang.org/x/tools v0.38.0 // indirect ) diff --git a/src/crypto/internal/fips140/aes/_asm/ctr/go.sum b/src/crypto/internal/fips140/aes/_asm/ctr/go.sum index 76af484b2eb..7186595f98f 100644 --- a/src/crypto/internal/fips140/aes/_asm/ctr/go.sum +++ b/src/crypto/internal/fips140/aes/_asm/ctr/go.sum @@ -1,8 +1,10 @@ +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/mmcloughlin/avo v0.6.0 h1:QH6FU8SKoTLaVs80GA8TJuLNkUYl4VokHKlPhVDg4YY= github.com/mmcloughlin/avo v0.6.0/go.mod h1:8CoAGaCSYXtCPR+8y18Y9aB/kxb8JSS6FRI7mSkvD+8= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= diff --git a/src/crypto/internal/fips140/bigmod/nat.go b/src/crypto/internal/fips140/bigmod/nat.go index 7b690178b9e..2d5921dbbc5 100644 --- a/src/crypto/internal/fips140/bigmod/nat.go +++ b/src/crypto/internal/fips140/bigmod/nat.go @@ -1088,7 +1088,7 @@ func (x *Nat) GCDVarTime(a, b *Nat) (*Nat, error) { return x.set(u), nil } -// extendedGCD computes u and A such that a = GCD(a, m) and u = A*a - B*m. +// extendedGCD computes u and A such that u = GCD(a, m) = A*a - B*m. // // u will have the size of the larger of a and m, and A will have the size of m. // diff --git a/src/crypto/internal/fips140/drbg/rand.go b/src/crypto/internal/fips140/drbg/rand.go index 3ccb018e326..cec697c7ab8 100644 --- a/src/crypto/internal/fips140/drbg/rand.go +++ b/src/crypto/internal/fips140/drbg/rand.go @@ -9,8 +9,8 @@ package drbg import ( + entropy "crypto/internal/entropy/v1.0.0" "crypto/internal/fips140" - "crypto/internal/fips140/entropy" "crypto/internal/randutil" "crypto/internal/sysrand" "io" diff --git a/src/crypto/internal/fips140/edwards25519/scalar_alias_test.go b/src/crypto/internal/fips140/edwards25519/scalar_alias_test.go index 1893a7fc0c4..47831dbfb24 100644 --- a/src/crypto/internal/fips140/edwards25519/scalar_alias_test.go +++ b/src/crypto/internal/fips140/edwards25519/scalar_alias_test.go @@ -71,7 +71,7 @@ func TestScalarAliasing(t *testing.T) { return x == x1 && y == y1 } - for name, f := range map[string]interface{}{ + for name, f := range map[string]any{ "Negate": func(v, x Scalar) bool { return checkAliasingOneArg((*Scalar).Negate, v, x) }, diff --git a/src/crypto/internal/fips140/edwards25519/tables.go b/src/crypto/internal/fips140/edwards25519/tables.go index 801b76771d1..7da3f7b15bc 100644 --- a/src/crypto/internal/fips140/edwards25519/tables.go +++ b/src/crypto/internal/fips140/edwards25519/tables.go @@ -4,9 +4,7 @@ package edwards25519 -import ( - "crypto/internal/fips140/subtle" -) +import "crypto/internal/constanttime" // A dynamic lookup table for variable-base, constant-time scalar muls. type projLookupTable struct { @@ -95,7 +93,7 @@ func (v *projLookupTable) SelectInto(dest *projCached, x int8) { dest.Zero() for j := 1; j <= 8; j++ { // Set dest = j*Q if |x| = j - cond := subtle.ConstantTimeByteEq(xabs, uint8(j)) + cond := constanttime.ByteEq(xabs, uint8(j)) dest.Select(&v.points[j-1], dest, cond) } // Now dest = |x|*Q, conditionally negate to get x*Q @@ -111,7 +109,7 @@ func (v *affineLookupTable) SelectInto(dest *affineCached, x int8) { dest.Zero() for j := 1; j <= 8; j++ { // Set dest = j*Q if |x| = j - cond := subtle.ConstantTimeByteEq(xabs, uint8(j)) + cond := constanttime.ByteEq(xabs, uint8(j)) dest.Select(&v.points[j-1], dest, cond) } // Now dest = |x|*Q, conditionally negate to get x*Q diff --git a/src/crypto/internal/fips140/nistec/generate.go b/src/crypto/internal/fips140/nistec/generate.go index 7786dc556f5..75b1ac60f0b 100644 --- a/src/crypto/internal/fips140/nistec/generate.go +++ b/src/crypto/internal/fips140/nistec/generate.go @@ -140,8 +140,8 @@ const tmplNISTEC = `// Copyright 2022 The Go Authors. All rights reserved. package nistec import ( + "crypto/internal/constanttime" "crypto/internal/fips140/nistec/fiat" - "crypto/internal/fips140/subtle" "errors" "sync" ) @@ -467,7 +467,7 @@ func (table *{{.p}}Table) Select(p *{{.P}}Point, n uint8) { } p.Set(New{{.P}}Point()) for i := uint8(1); i < 16; i++ { - cond := subtle.ConstantTimeByteEq(i, n) + cond := constanttime.ByteEq(i, n) p.Select(table[i-1], p, cond) } } diff --git a/src/crypto/internal/fips140/nistec/p224.go b/src/crypto/internal/fips140/nistec/p224.go index 82bced251fe..7965b186891 100644 --- a/src/crypto/internal/fips140/nistec/p224.go +++ b/src/crypto/internal/fips140/nistec/p224.go @@ -7,8 +7,8 @@ package nistec import ( + "crypto/internal/constanttime" "crypto/internal/fips140/nistec/fiat" - "crypto/internal/fips140/subtle" "errors" "sync" ) @@ -333,7 +333,7 @@ func (table *p224Table) Select(p *P224Point, n uint8) { } p.Set(NewP224Point()) for i := uint8(1); i < 16; i++ { - cond := subtle.ConstantTimeByteEq(i, n) + cond := constanttime.ByteEq(i, n) p.Select(table[i-1], p, cond) } } diff --git a/src/crypto/internal/fips140/nistec/p256.go b/src/crypto/internal/fips140/nistec/p256.go index c957c542473..650bde4e73e 100644 --- a/src/crypto/internal/fips140/nistec/p256.go +++ b/src/crypto/internal/fips140/nistec/p256.go @@ -7,8 +7,8 @@ package nistec import ( + "crypto/internal/constanttime" "crypto/internal/fips140/nistec/fiat" - "crypto/internal/fips140/subtle" "crypto/internal/fips140deps/byteorder" "crypto/internal/fips140deps/cpu" "errors" @@ -458,7 +458,7 @@ func (table *p256Table) Select(p *P256Point, n uint8) { } p.Set(NewP256Point()) for i := uint8(1); i <= 16; i++ { - cond := subtle.ConstantTimeByteEq(i, n) + cond := constanttime.ByteEq(i, n) p.Select(&table[i-1], p, cond) } } @@ -553,7 +553,7 @@ func (table *p256AffineTable) Select(p *p256AffinePoint, n uint8) { panic("nistec: internal error: p256AffineTable.Select called with out-of-bounds value") } for i := uint8(1); i <= 32; i++ { - cond := subtle.ConstantTimeByteEq(i, n) + cond := constanttime.ByteEq(i, n) p.x.Select(&table[i-1].x, &p.x, cond) p.y.Select(&table[i-1].y, &p.y, cond) } @@ -618,7 +618,7 @@ func (p *P256Point) ScalarBaseMult(scalar []byte) (*P256Point, error) { // the point at infinity (because infinity can't be represented in affine // coordinates). Here we conditionally set p to the infinity if sel is zero. // In the loop, that's handled by AddAffine. - selIsZero := subtle.ConstantTimeByteEq(sel, 0) + selIsZero := constanttime.ByteEq(sel, 0) p.Select(NewP256Point(), t.Projective(), selIsZero) for index >= 5 { @@ -636,7 +636,7 @@ func (p *P256Point) ScalarBaseMult(scalar []byte) (*P256Point, error) { table := &p256GeneratorTables[(index+1)/6] table.Select(t, sel) t.Negate(sign) - selIsZero := subtle.ConstantTimeByteEq(sel, 0) + selIsZero := constanttime.ByteEq(sel, 0) p.AddAffine(p, t, selIsZero) } diff --git a/src/crypto/internal/fips140/nistec/p384.go b/src/crypto/internal/fips140/nistec/p384.go index 318c08a9797..352f1a806e8 100644 --- a/src/crypto/internal/fips140/nistec/p384.go +++ b/src/crypto/internal/fips140/nistec/p384.go @@ -7,8 +7,8 @@ package nistec import ( + "crypto/internal/constanttime" "crypto/internal/fips140/nistec/fiat" - "crypto/internal/fips140/subtle" "errors" "sync" ) @@ -333,7 +333,7 @@ func (table *p384Table) Select(p *P384Point, n uint8) { } p.Set(NewP384Point()) for i := uint8(1); i < 16; i++ { - cond := subtle.ConstantTimeByteEq(i, n) + cond := constanttime.ByteEq(i, n) p.Select(table[i-1], p, cond) } } diff --git a/src/crypto/internal/fips140/nistec/p521.go b/src/crypto/internal/fips140/nistec/p521.go index 8ade8a33040..429f6379934 100644 --- a/src/crypto/internal/fips140/nistec/p521.go +++ b/src/crypto/internal/fips140/nistec/p521.go @@ -7,8 +7,8 @@ package nistec import ( + "crypto/internal/constanttime" "crypto/internal/fips140/nistec/fiat" - "crypto/internal/fips140/subtle" "errors" "sync" ) @@ -333,7 +333,7 @@ func (table *p521Table) Select(p *P521Point, n uint8) { } p.Set(NewP521Point()) for i := uint8(1); i < 16; i++ { - cond := subtle.ConstantTimeByteEq(i, n) + cond := constanttime.ByteEq(i, n) p.Select(table[i-1], p, cond) } } diff --git a/src/crypto/internal/fips140/rsa/pkcs1v22.go b/src/crypto/internal/fips140/rsa/pkcs1v22.go index de7943773e6..29c47069a3e 100644 --- a/src/crypto/internal/fips140/rsa/pkcs1v22.go +++ b/src/crypto/internal/fips140/rsa/pkcs1v22.go @@ -9,6 +9,7 @@ package rsa import ( "bytes" + "crypto/internal/constanttime" "crypto/internal/fips140" "crypto/internal/fips140/drbg" "crypto/internal/fips140/sha256" @@ -316,7 +317,7 @@ func VerifyPSS(pub *PublicKey, hash hash.Hash, digest []byte, sig []byte) error return verifyPSS(pub, hash, digest, sig, pssSaltLengthAutodetect) } -// VerifyPSS verifies sig with RSASSA-PSS and an expected salt length. +// VerifyPSSWithSaltLength verifies sig with RSASSA-PSS and an expected salt length. func VerifyPSSWithSaltLength(pub *PublicKey, hash hash.Hash, digest []byte, sig []byte, saltLength int) error { if saltLength < 0 { return errors.New("crypto/rsa: salt length cannot be negative") @@ -432,7 +433,7 @@ func DecryptOAEP(hash, mgfHash hash.Hash, priv *PrivateKey, ciphertext []byte, l hash.Write(label) lHash := hash.Sum(nil) - firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0) + firstByteIsZero := constanttime.ByteEq(em[0], 0) seed := em[1 : hash.Size()+1] db := em[hash.Size()+1:] @@ -458,11 +459,11 @@ func DecryptOAEP(hash, mgfHash hash.Hash, priv *PrivateKey, ciphertext []byte, l rest := db[hash.Size():] for i := 0; i < len(rest); i++ { - equals0 := subtle.ConstantTimeByteEq(rest[i], 0) - equals1 := subtle.ConstantTimeByteEq(rest[i], 1) - index = subtle.ConstantTimeSelect(lookingForIndex&equals1, i, index) - lookingForIndex = subtle.ConstantTimeSelect(equals1, 0, lookingForIndex) - invalid = subtle.ConstantTimeSelect(lookingForIndex&^equals0, 1, invalid) + equals0 := constanttime.ByteEq(rest[i], 0) + equals1 := constanttime.ByteEq(rest[i], 1) + index = constanttime.Select(lookingForIndex&equals1, i, index) + lookingForIndex = constanttime.Select(equals1, 0, lookingForIndex) + invalid = constanttime.Select(lookingForIndex&^equals0, 1, invalid) } if firstByteIsZero&lHash2Good&^invalid&^lookingForIndex != 1 { diff --git a/src/crypto/internal/fips140/sha3/hashes.go b/src/crypto/internal/fips140/sha3/hashes.go index da1b9bcf5f8..cafa169bb9f 100644 --- a/src/crypto/internal/fips140/sha3/hashes.go +++ b/src/crypto/internal/fips140/sha3/hashes.go @@ -24,13 +24,6 @@ func New512() *Digest { return &Digest{rate: rateK1024, outputLen: 64, dsbyte: dsbyteSHA3} } -// TODO(fips): do this in the stdlib crypto/sha3 package. -// -// crypto.RegisterHash(crypto.SHA3_224, New224) -// crypto.RegisterHash(crypto.SHA3_256, New256) -// crypto.RegisterHash(crypto.SHA3_384, New384) -// crypto.RegisterHash(crypto.SHA3_512, New512) - const ( dsbyteSHA3 = 0b00000110 dsbyteKeccak = 0b00000001 diff --git a/src/crypto/internal/fips140/subtle/constant_time.go b/src/crypto/internal/fips140/subtle/constant_time.go index fa7a002d3fa..fc1e3079855 100644 --- a/src/crypto/internal/fips140/subtle/constant_time.go +++ b/src/crypto/internal/fips140/subtle/constant_time.go @@ -5,6 +5,7 @@ package subtle import ( + "crypto/internal/constanttime" "crypto/internal/fips140deps/byteorder" "math/bits" ) @@ -24,7 +25,7 @@ func ConstantTimeCompare(x, y []byte) int { v |= x[i] ^ y[i] } - return ConstantTimeByteEq(v, 0) + return constanttime.ByteEq(v, 0) } // ConstantTimeLessOrEqBytes returns 1 if x <= y and 0 otherwise. The comparison @@ -58,20 +59,6 @@ func ConstantTimeLessOrEqBytes(x, y []byte) int { return int(b ^ 1) } -// ConstantTimeSelect returns x if v == 1 and y if v == 0. -// Its behavior is undefined if v takes any other value. -func ConstantTimeSelect(v, x, y int) int { return ^(v-1)&x | (v-1)&y } - -// ConstantTimeByteEq returns 1 if x == y and 0 otherwise. -func ConstantTimeByteEq(x, y uint8) int { - return int((uint32(x^y) - 1) >> 31) -} - -// ConstantTimeEq returns 1 if x == y and 0 otherwise. -func ConstantTimeEq(x, y int32) int { - return int((uint64(uint32(x^y)) - 1) >> 63) -} - // ConstantTimeCopy copies the contents of y into x (a slice of equal length) // if v == 1. If v == 0, x is left unchanged. Its behavior is undefined if v // takes any other value. @@ -86,11 +73,3 @@ func ConstantTimeCopy(v int, x, y []byte) { x[i] = x[i]&xmask | y[i]&ymask } } - -// ConstantTimeLessOrEq returns 1 if x <= y and 0 otherwise. -// Its behavior is undefined if x or y are negative or > 2**31 - 1. -func ConstantTimeLessOrEq(x, y int) int { - x32 := int32(x) - y32 := int32(y) - return int(((x32 - y32 - 1) >> 31) & 1) -} diff --git a/src/crypto/internal/fips140/subtle/xor_asm.go b/src/crypto/internal/fips140/subtle/xor_asm.go index b07239da3e3..bb85aefef40 100644 --- a/src/crypto/internal/fips140/subtle/xor_asm.go +++ b/src/crypto/internal/fips140/subtle/xor_asm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (amd64 || arm64 || mips || mipsle || mips64 || mips64le || ppc64 || ppc64le || riscv64) && !purego +//go:build (amd64 || arm64 || ppc64 || ppc64le || riscv64) && !purego package subtle diff --git a/src/crypto/internal/fips140/subtle/xor_generic.go b/src/crypto/internal/fips140/subtle/xor_generic.go index ed484bc630e..0b31eec6019 100644 --- a/src/crypto/internal/fips140/subtle/xor_generic.go +++ b/src/crypto/internal/fips140/subtle/xor_generic.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (!amd64 && !arm64 && !loong64 && !mips && !mipsle && !mips64 && !mips64le && !ppc64 && !ppc64le && !riscv64) || purego +//go:build (!amd64 && !arm64 && !loong64 && !ppc64 && !ppc64le && !riscv64) || purego package subtle diff --git a/src/crypto/internal/fips140/subtle/xor_mips64x.s b/src/crypto/internal/fips140/subtle/xor_mips64x.s deleted file mode 100644 index e580235914a..00000000000 --- a/src/crypto/internal/fips140/subtle/xor_mips64x.s +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (mips64 || mips64le) && !purego - -#include "textflag.h" - -// func xorBytes(dst, a, b *byte, n int) -TEXT ·xorBytes(SB), NOSPLIT|NOFRAME, $0 - MOVV dst+0(FP), R1 - MOVV a+8(FP), R2 - MOVV b+16(FP), R3 - MOVV n+24(FP), R4 - -xor_64_check: - SGTU $64, R4, R5 // R5 = 1 if (64 > R4) - BNE R5, xor_32_check -xor_64: - MOVV (R2), R6 - MOVV 8(R2), R7 - MOVV 16(R2), R8 - MOVV 24(R2), R9 - MOVV (R3), R10 - MOVV 8(R3), R11 - MOVV 16(R3), R12 - MOVV 24(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVV R10, (R1) - MOVV R11, 8(R1) - MOVV R12, 16(R1) - MOVV R13, 24(R1) - MOVV 32(R2), R6 - MOVV 40(R2), R7 - MOVV 48(R2), R8 - MOVV 56(R2), R9 - MOVV 32(R3), R10 - MOVV 40(R3), R11 - MOVV 48(R3), R12 - MOVV 56(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVV R10, 32(R1) - MOVV R11, 40(R1) - MOVV R12, 48(R1) - MOVV R13, 56(R1) - ADDV $64, R2 - ADDV $64, R3 - ADDV $64, R1 - SUBV $64, R4 - SGTU $64, R4, R5 - BEQ R0, R5, xor_64 - BEQ R0, R4, end - -xor_32_check: - SGTU $32, R4, R5 - BNE R5, xor_16_check -xor_32: - MOVV (R2), R6 - MOVV 8(R2), R7 - MOVV 16(R2), R8 - MOVV 24(R2), R9 - MOVV (R3), R10 - MOVV 8(R3), R11 - MOVV 16(R3), R12 - MOVV 24(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVV R10, (R1) - MOVV R11, 8(R1) - MOVV R12, 16(R1) - MOVV R13, 24(R1) - ADDV $32, R2 - ADDV $32, R3 - ADDV $32, R1 - SUBV $32, R4 - BEQ R0, R4, end - -xor_16_check: - SGTU $16, R4, R5 - BNE R5, xor_8_check -xor_16: - MOVV (R2), R6 - MOVV 8(R2), R7 - MOVV (R3), R8 - MOVV 8(R3), R9 - XOR R6, R8 - XOR R7, R9 - MOVV R8, (R1) - MOVV R9, 8(R1) - ADDV $16, R2 - ADDV $16, R3 - ADDV $16, R1 - SUBV $16, R4 - BEQ R0, R4, end - -xor_8_check: - SGTU $8, R4, R5 - BNE R5, xor_4_check -xor_8: - MOVV (R2), R6 - MOVV (R3), R7 - XOR R6, R7 - MOVV R7, (R1) - ADDV $8, R1 - ADDV $8, R2 - ADDV $8, R3 - SUBV $8, R4 - BEQ R0, R4, end - -xor_4_check: - SGTU $4, R4, R5 - BNE R5, xor_2_check -xor_4: - MOVW (R2), R6 - MOVW (R3), R7 - XOR R6, R7 - MOVW R7, (R1) - ADDV $4, R2 - ADDV $4, R3 - ADDV $4, R1 - SUBV $4, R4 - BEQ R0, R4, end - -xor_2_check: - SGTU $2, R4, R5 - BNE R5, xor_1 -xor_2: - MOVH (R2), R6 - MOVH (R3), R7 - XOR R6, R7 - MOVH R7, (R1) - ADDV $2, R2 - ADDV $2, R3 - ADDV $2, R1 - SUBV $2, R4 - BEQ R0, R4, end - -xor_1: - MOVB (R2), R6 - MOVB (R3), R7 - XOR R6, R7 - MOVB R7, (R1) - -end: - RET diff --git a/src/crypto/internal/fips140/subtle/xor_mipsx.s b/src/crypto/internal/fips140/subtle/xor_mipsx.s deleted file mode 100644 index 1a6b3f409dd..00000000000 --- a/src/crypto/internal/fips140/subtle/xor_mipsx.s +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (mips || mipsle) && !purego - -#include "textflag.h" - -// func xorBytes(dst, a, b *byte, n int) -TEXT ·xorBytes(SB), NOSPLIT|NOFRAME, $0 - MOVW dst+0(FP), R1 - MOVW a+4(FP), R2 - MOVW b+8(FP), R3 - MOVW n+12(FP), R4 - - SGTU $64, R4, R5 // R5 = 1 if (64 > R4) - BNE R5, xor_32_check -xor_64: - MOVW (R2), R6 - MOVW 4(R2), R7 - MOVW 8(R2), R8 - MOVW 12(R2), R9 - MOVW (R3), R10 - MOVW 4(R3), R11 - MOVW 8(R3), R12 - MOVW 12(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, (R1) - MOVW R11, 4(R1) - MOVW R12, 8(R1) - MOVW R13, 12(R1) - MOVW 16(R2), R6 - MOVW 20(R2), R7 - MOVW 24(R2), R8 - MOVW 28(R2), R9 - MOVW 16(R3), R10 - MOVW 20(R3), R11 - MOVW 24(R3), R12 - MOVW 28(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, 16(R1) - MOVW R11, 20(R1) - MOVW R12, 24(R1) - MOVW R13, 28(R1) - MOVW 32(R2), R6 - MOVW 36(R2), R7 - MOVW 40(R2), R8 - MOVW 44(R2), R9 - MOVW 32(R3), R10 - MOVW 36(R3), R11 - MOVW 40(R3), R12 - MOVW 44(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, 32(R1) - MOVW R11, 36(R1) - MOVW R12, 40(R1) - MOVW R13, 44(R1) - MOVW 48(R2), R6 - MOVW 52(R2), R7 - MOVW 56(R2), R8 - MOVW 60(R2), R9 - MOVW 48(R3), R10 - MOVW 52(R3), R11 - MOVW 56(R3), R12 - MOVW 60(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, 48(R1) - MOVW R11, 52(R1) - MOVW R12, 56(R1) - MOVW R13, 60(R1) - ADD $64, R2 - ADD $64, R3 - ADD $64, R1 - SUB $64, R4 - SGTU $64, R4, R5 - BEQ R0, R5, xor_64 - BEQ R0, R4, end - -xor_32_check: - SGTU $32, R4, R5 - BNE R5, xor_16_check -xor_32: - MOVW (R2), R6 - MOVW 4(R2), R7 - MOVW 8(R2), R8 - MOVW 12(R2), R9 - MOVW (R3), R10 - MOVW 4(R3), R11 - MOVW 8(R3), R12 - MOVW 12(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, (R1) - MOVW R11, 4(R1) - MOVW R12, 8(R1) - MOVW R13, 12(R1) - MOVW 16(R2), R6 - MOVW 20(R2), R7 - MOVW 24(R2), R8 - MOVW 28(R2), R9 - MOVW 16(R3), R10 - MOVW 20(R3), R11 - MOVW 24(R3), R12 - MOVW 28(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, 16(R1) - MOVW R11, 20(R1) - MOVW R12, 24(R1) - MOVW R13, 28(R1) - ADD $32, R2 - ADD $32, R3 - ADD $32, R1 - SUB $32, R4 - BEQ R0, R4, end - -xor_16_check: - SGTU $16, R4, R5 - BNE R5, xor_8_check -xor_16: - MOVW (R2), R6 - MOVW 4(R2), R7 - MOVW 8(R2), R8 - MOVW 12(R2), R9 - MOVW (R3), R10 - MOVW 4(R3), R11 - MOVW 8(R3), R12 - MOVW 12(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, (R1) - MOVW R11, 4(R1) - MOVW R12, 8(R1) - MOVW R13, 12(R1) - ADD $16, R2 - ADD $16, R3 - ADD $16, R1 - SUB $16, R4 - BEQ R0, R4, end - -xor_8_check: - SGTU $8, R4, R5 - BNE R5, xor_4_check -xor_8: - MOVW (R2), R6 - MOVW 4(R2), R7 - MOVW (R3), R8 - MOVW 4(R3), R9 - XOR R6, R8 - XOR R7, R9 - MOVW R8, (R1) - MOVW R9, 4(R1) - ADD $8, R1 - ADD $8, R2 - ADD $8, R3 - SUB $8, R4 - BEQ R0, R4, end - -xor_4_check: - SGTU $4, R4, R5 - BNE R5, xor_2_check -xor_4: - MOVW (R2), R6 - MOVW (R3), R7 - XOR R6, R7 - MOVW R7, (R1) - ADD $4, R2 - ADD $4, R3 - ADD $4, R1 - SUB $4, R4 - BEQ R0, R4, end - -xor_2_check: - SGTU $2, R4, R5 - BNE R5, xor_1 -xor_2: - MOVH (R2), R6 - MOVH (R3), R7 - XOR R6, R7 - MOVH R7, (R1) - ADD $2, R2 - ADD $2, R3 - ADD $2, R1 - SUB $2, R4 - BEQ R0, R4, end - -xor_1: - MOVB (R2), R6 - MOVB (R3), R7 - XOR R6, R7 - MOVB R7, (R1) - -end: - RET diff --git a/src/crypto/internal/fips140deps/fipsdeps_test.go b/src/crypto/internal/fips140deps/fipsdeps_test.go index 97552dc1ce1..29a56047c3c 100644 --- a/src/crypto/internal/fips140deps/fipsdeps_test.go +++ b/src/crypto/internal/fips140deps/fipsdeps_test.go @@ -16,10 +16,11 @@ import ( // // DO NOT add new packages here just to make the tests pass. var AllowedInternalPackages = map[string]bool{ - // entropy.Depleted is the external passive entropy source, and sysrand.Read + // entropy.Depleted/Seed is the entropy source, and sysrand.Read // is the actual (but uncredited!) random bytes source. - "crypto/internal/entropy": true, - "crypto/internal/sysrand": true, + "crypto/internal/entropy": true, + "crypto/internal/entropy/v1.0.0": true, + "crypto/internal/sysrand": true, // impl.Register is how the packages expose their alternative // implementations to tests outside the module. @@ -27,6 +28,9 @@ var AllowedInternalPackages = map[string]bool{ // randutil.MaybeReadByte is used in non-FIPS mode by GenerateKey functions. "crypto/internal/randutil": true, + + // constanttime are the constant-time intrinsics. + "crypto/internal/constanttime": true, } func TestImports(t *testing.T) { @@ -88,8 +92,7 @@ func TestImports(t *testing.T) { } } - // Ensure that all packages except check, check's dependencies, and the - // entropy source (which is used only from .../fips140/drbg) import check. + // Ensure that all packages except check and check's dependencies import check. for pkg := range allPackages { switch pkg { case "crypto/internal/fips140/check": @@ -100,7 +103,6 @@ func TestImports(t *testing.T) { case "crypto/internal/fips140/sha3": case "crypto/internal/fips140/sha256": case "crypto/internal/fips140/sha512": - case "crypto/internal/fips140/entropy": default: if !importCheck[pkg] { t.Errorf("package %s does not import crypto/internal/fips140/check", pkg) diff --git a/src/crypto/internal/fips140test/acvp_capabilities.entropy.json b/src/crypto/internal/fips140test/acvp_capabilities.entropy.json new file mode 100644 index 00000000000..0782d69bfc2 --- /dev/null +++ b/src/crypto/internal/fips140test/acvp_capabilities.entropy.json @@ -0,0 +1,3 @@ +[ + {"algorithm":"SHA2-384","messageLength":[{"increment":7040,"max":8192,"min":1152}],"revision":"1.0"} +] diff --git a/src/crypto/internal/fips140test/acvp_test.go b/src/crypto/internal/fips140test/acvp_test.go index 47a42cce1bc..3e1c0cb06a1 100644 --- a/src/crypto/internal/fips140test/acvp_test.go +++ b/src/crypto/internal/fips140test/acvp_test.go @@ -23,6 +23,7 @@ import ( "bytes" "crypto/elliptic" "crypto/internal/cryptotest" + "crypto/internal/entropy/v1.0.0" "crypto/internal/fips140" "crypto/internal/fips140/aes" "crypto/internal/fips140/aes/gcm" @@ -62,6 +63,11 @@ import ( var noPAAPAI = os.Getenv("GONOPAAPAI") == "1" +// Use the capabilities, configuration and commands for the entropy source. +// This is used to test the separate entropy source in crypto/internal/entropy +// since the algorithm name alone can't indicate which to test. +var entropyTesting = os.Getenv("GOENTROPYSOURCEACVP") == "1" + func TestMain(m *testing.M) { if noPAAPAI { for _, p := range impl.Packages() { @@ -155,6 +161,13 @@ var ( //go:embed acvp_capabilities.json capabilitiesJson []byte + // Separate capabilities specific to testing the entropy source's SHA2-384 implementation. + // This implementation differs from the FIPS module's SHA2-384 in its supported input sizes. + // Set the GOENTROPYSOURCEACVP environment variable to use these capabilities in place of + // capabilitiesJson + //go:embed acvp_capabilities.entropy.json + entropyCapabilitiesJson []byte + // commands should reflect what config says we support. E.g. adding a command here will be a NOP // unless the configuration/acvp_capabilities.json indicates the command's associated algorithm // is supported. @@ -183,6 +196,14 @@ var ( "SHA3-512": cmdHashAft(sha3.New512()), "SHA3-512/MCT": cmdSha3Mct(sha3.New512()), + // Note: the "/ENTROPY" suffix is our own creation, and applied conditionally + // based on the environment variable that indicates our acvp_test module wrapper + // is being used for evaluating the separate SHA-384 implementation for the + // CPU jitter entropy conditioning. Set GOENTROPYSOURCEACVP=1 to use these commands + // in place of SHA2-384. + "SHA2-384/ENTROPY": cmdEntropyHashEntropySha384Aft(), + "SHA2-384/MCT/ENTROPY": cmdEntropyHashEntropySha384Mct(), + // Note: SHAKE AFT and VOT test types can be handled by the same command // handler impl, but use distinct acvptool command names, and so are // registered twice with the same digest: once under "SHAKE-xxx" for AFT, @@ -363,6 +384,10 @@ func processingLoop(reader io.Reader, writer io.Writer) error { return fmt.Errorf("reading request: %w", err) } + if entropyTesting && strings.HasPrefix(req.name, "SHA2-384") { + req.name = fmt.Sprintf("%s/ENTROPY", req.name) + } + cmd, exists := commands[req.name] if !exists { return fmt.Errorf("unknown command: %q", req.name) @@ -460,9 +485,16 @@ func writeResponse(writer io.Writer, args [][]byte) error { // which takes no arguments and returns a single byte string // which is a JSON blob of ACVP algorithm configuration." func cmdGetConfig() command { + // If GOENTROPYSOURCEACVP is set, then use the entropyCapabilitiesJson + // instead of capabilitiesJson. + capabilities := [][]byte{capabilitiesJson} + if entropyTesting { + capabilities = [][]byte{entropyCapabilitiesJson} + } + return command{ handler: func(args [][]byte) ([][]byte, error) { - return [][]byte{capabilitiesJson}, nil + return capabilities, nil }, } } @@ -533,6 +565,46 @@ func cmdHashMct(h hash.Hash) command { } } +// cmdEntropyHashEntropySha384Aft returns a command handler that tests the +// entropy package's SHA2-384 digest for AFT inputs. +func cmdEntropyHashEntropySha384Aft() command { + return command{ + requiredArgs: 1, // Message to hash. + handler: func(args [][]byte) ([][]byte, error) { + digest := entropy.TestingOnlySHA384(args[0]) + return [][]byte{digest[:]}, nil + }, + } +} + +// cmdEntropyHashEntropySha384Mct returns a command handler that tests the +// entropy package's SHA2-384 digest for MCT inputs. +func cmdEntropyHashEntropySha384Mct() command { + return command{ + requiredArgs: 1, // Seed message. + handler: func(args [][]byte) ([][]byte, error) { + hSize := 48 + seed := args[0] + + digest := make([]byte, 0, hSize) + buf := make([]byte, 0, 3*hSize) + buf = append(buf, seed...) + buf = append(buf, seed...) + buf = append(buf, seed...) + + for i := 0; i < 1000; i++ { + digestRaw := entropy.TestingOnlySHA384(buf) + digest = digestRaw[:hSize] + + copy(buf, buf[hSize:]) + copy(buf[2*hSize:], digest) + } + + return [][]byte{buf[hSize*2:]}, nil + }, + } +} + // cmdSha3Mct returns a command handler for the specified hash // algorithm for SHA-3 monte carlo test (MCT) test cases. // diff --git a/src/crypto/internal/fips140test/entropy_test.go b/src/crypto/internal/fips140test/entropy_test.go index 76c24289520..82d921ffb73 100644 --- a/src/crypto/internal/fips140test/entropy_test.go +++ b/src/crypto/internal/fips140test/entropy_test.go @@ -9,8 +9,9 @@ package fipstest import ( "bytes" "crypto/internal/cryptotest" + entropy "crypto/internal/entropy/v1.0.0" "crypto/internal/fips140/drbg" - "crypto/internal/fips140/entropy" + "crypto/rand" "crypto/sha256" "crypto/sha512" "encoding/hex" @@ -31,13 +32,19 @@ var flagNISTSP80090B = flag.Bool("nist-sp800-90b", false, "run NIST SP 800-90B t func TestEntropySamples(t *testing.T) { cryptotest.MustSupportFIPS140(t) + now := time.Now().UTC() - var seqSamples [1_000_000]uint8 - samplesOrTryAgain(t, seqSamples[:]) - seqSamplesName := fmt.Sprintf("entropy_samples_sequential_%s_%s_%s_%s_%s.bin", entropy.Version(), - runtime.GOOS, runtime.GOARCH, *flagEntropySamples, time.Now().Format("20060102T150405Z")) + seqSampleCount := 1_000_000 if *flagEntropySamples != "" { - if err := os.WriteFile(seqSamplesName, seqSamples[:], 0644); err != nil { + // The lab requested 300 million samples for a new heuristic procedure. + seqSampleCount = 300_000_000 + } + seqSamples := make([]uint8, seqSampleCount) + samplesOrTryAgain(t, seqSamples) + seqSamplesName := fmt.Sprintf("entropy_samples_sequential_%s_%s_%s_%s_%s.bin", entropy.Version(), + runtime.GOOS, runtime.GOARCH, *flagEntropySamples, now.Format("20060102T150405Z")) + if *flagEntropySamples != "" { + if err := os.WriteFile(seqSamplesName, seqSamples, 0644); err != nil { t.Fatalf("failed to write samples to %q: %v", seqSamplesName, err) } t.Logf("wrote %s", seqSamplesName) @@ -50,7 +57,7 @@ func TestEntropySamples(t *testing.T) { copy(restartSamples[i][:], samples[:]) } restartSamplesName := fmt.Sprintf("entropy_samples_restart_%s_%s_%s_%s_%s.bin", entropy.Version(), - runtime.GOOS, runtime.GOARCH, *flagEntropySamples, time.Now().Format("20060102T150405Z")) + runtime.GOOS, runtime.GOARCH, *flagEntropySamples, now.Format("20060102T150405Z")) if *flagEntropySamples != "" { f, err := os.Create(restartSamplesName) if err != nil { @@ -158,6 +165,16 @@ func TestEntropySHA384(t *testing.T) { if got != want { t.Errorf("SHA384() = %x, want %x", got, want) } + + for l := range 1024*3 + 1 { + input := make([]byte, l) + rand.Read(input) + want := sha512.Sum384(input) + got := entropy.TestingOnlySHA384(input) + if got != want { + t.Errorf("TestingOnlySHA384(%d bytes) = %x, want %x", l, got, want) + } + } } func TestEntropyRepetitionCountTest(t *testing.T) { @@ -205,7 +222,7 @@ func TestEntropyUnchanged(t *testing.T) { testenv.MustHaveSource(t) h := sha256.New() - root := os.DirFS("../fips140/entropy") + root := os.DirFS("../entropy/v1.0.0") if err := fs.WalkDir(root, ".", func(path string, d fs.DirEntry, err error) error { if err != nil { return err @@ -225,24 +242,24 @@ func TestEntropyUnchanged(t *testing.T) { t.Fatalf("WalkDir: %v", err) } - // The crypto/internal/fips140/entropy package is certified as a FIPS 140-3 + // The crypto/internal/entropy/v1.0.0 package is certified as a FIPS 140-3 // entropy source through the Entropy Source Validation program, // independently of the FIPS 140-3 module. It must not change even across // FIPS 140-3 module versions, in order to reuse the ESV certificate. - exp := "35976eb8a11678c79777da07aaab5511d4325701f837777df205f6e7b20c6821" + exp := "2541273241ae8aafe55026328354ed3799df1e2fb308b2097833203a42911b53" if got := hex.EncodeToString(h.Sum(nil)); got != exp { - t.Errorf("hash of crypto/internal/fips140/entropy = %s, want %s", got, exp) + t.Errorf("hash of crypto/internal/entropy/v1.0.0 = %s, want %s", got, exp) } } func TestEntropyRace(t *testing.T) { // Check that concurrent calls to Seed don't trigger the race detector. - for range 2 { + for range 16 { go func() { _, _ = entropy.Seed(&memory) }() } - // Same, with the higher-level DRBG. More concurrent calls to hit the Pool. + // Same, with the higher-level DRBG. for range 16 { go func() { var b [64]byte diff --git a/src/crypto/internal/fips140test/nistec_test.go b/src/crypto/internal/fips140test/nistec_test.go index 3b3de2bc2cb..9b4b7cba7fa 100644 --- a/src/crypto/internal/fips140test/nistec_test.go +++ b/src/crypto/internal/fips140test/nistec_test.go @@ -18,7 +18,7 @@ import ( func TestNISTECAllocations(t *testing.T) { cryptotest.SkipTestAllocations(t) t.Run("P224", func(t *testing.T) { - if allocs := testing.AllocsPerRun(10, func() { + if allocs := testing.AllocsPerRun(100, func() { p := nistec.NewP224Point().SetGenerator() scalar := make([]byte, 28) rand.Read(scalar) @@ -37,7 +37,7 @@ func TestNISTECAllocations(t *testing.T) { } }) t.Run("P256", func(t *testing.T) { - if allocs := testing.AllocsPerRun(10, func() { + if allocs := testing.AllocsPerRun(100, func() { p := nistec.NewP256Point().SetGenerator() scalar := make([]byte, 32) rand.Read(scalar) @@ -56,7 +56,7 @@ func TestNISTECAllocations(t *testing.T) { } }) t.Run("P384", func(t *testing.T) { - if allocs := testing.AllocsPerRun(10, func() { + if allocs := testing.AllocsPerRun(100, func() { p := nistec.NewP384Point().SetGenerator() scalar := make([]byte, 48) rand.Read(scalar) @@ -75,7 +75,7 @@ func TestNISTECAllocations(t *testing.T) { } }) t.Run("P521", func(t *testing.T) { - if allocs := testing.AllocsPerRun(10, func() { + if allocs := testing.AllocsPerRun(100, func() { p := nistec.NewP521Point().SetGenerator() scalar := make([]byte, 66) rand.Read(scalar) diff --git a/src/crypto/internal/fips140test/xaes_test.go b/src/crypto/internal/fips140test/xaes_test.go index 9406bfab7bf..c852832fcea 100644 --- a/src/crypto/internal/fips140test/xaes_test.go +++ b/src/crypto/internal/fips140test/xaes_test.go @@ -21,7 +21,7 @@ func TestXAESAllocations(t *testing.T) { t.Skip("Test reports non-zero allocation count. See issue #70448") } cryptotest.SkipTestAllocations(t) - if allocs := testing.AllocsPerRun(10, func() { + if allocs := testing.AllocsPerRun(100, func() { key := make([]byte, 32) nonce := make([]byte, 24) plaintext := make([]byte, 16) diff --git a/src/crypto/pbkdf2/pbkdf2.go b/src/crypto/pbkdf2/pbkdf2.go index dff2e6cb355..01fd12e40e3 100644 --- a/src/crypto/pbkdf2/pbkdf2.go +++ b/src/crypto/pbkdf2/pbkdf2.go @@ -27,7 +27,7 @@ import ( // can get a derived key for e.g. AES-256 (which needs a 32-byte key) by // doing: // -// dk := pbkdf2.Key(sha1.New, "some password", salt, 4096, 32) +// dk, err := pbkdf2.Key(sha1.New, "some password", salt, 4096, 32) // // Remember to get a good random salt. At least 8 bytes is recommended by the // RFC. diff --git a/src/crypto/rsa/rsa_test.go b/src/crypto/rsa/rsa_test.go index 0d5b6e08f0e..b9e85bd8ff8 100644 --- a/src/crypto/rsa/rsa_test.go +++ b/src/crypto/rsa/rsa_test.go @@ -212,7 +212,6 @@ func TestEverything(t *testing.T) { max = 2048 } for size := min; size <= max; size++ { - size := size t.Run(fmt.Sprintf("%d", size), func(t *testing.T) { t.Parallel() priv, err := GenerateKey(rand.Reader, size) diff --git a/src/crypto/sha256/sha256_test.go b/src/crypto/sha256/sha256_test.go index 11b24db7d6b..a18a536ba28 100644 --- a/src/crypto/sha256/sha256_test.go +++ b/src/crypto/sha256/sha256_test.go @@ -471,3 +471,17 @@ func BenchmarkHash256K(b *testing.B) { func BenchmarkHash1M(b *testing.B) { benchmarkSize(b, 1024*1024) } + +func TestAllocatonsWithTypeAsserts(t *testing.T) { + cryptotest.SkipTestAllocations(t) + allocs := testing.AllocsPerRun(100, func() { + h := New() + h.Write([]byte{1, 2, 3}) + marshaled, _ := h.(encoding.BinaryMarshaler).MarshalBinary() + marshaled, _ = h.(encoding.BinaryAppender).AppendBinary(marshaled[:0]) + h.(encoding.BinaryUnmarshaler).UnmarshalBinary(marshaled) + }) + if allocs != 0 { + t.Fatalf("allocs = %v; want = 0", allocs) + } +} diff --git a/src/crypto/subtle/constant_time.go b/src/crypto/subtle/constant_time.go index 22c1c64a0da..14c911101b0 100644 --- a/src/crypto/subtle/constant_time.go +++ b/src/crypto/subtle/constant_time.go @@ -6,7 +6,13 @@ // code but require careful thought to use correctly. package subtle -import "crypto/internal/fips140/subtle" +import ( + "crypto/internal/constanttime" + "crypto/internal/fips140/subtle" +) + +// These functions are forwarded to crypto/internal/constanttime for intrinsified +// operations, and to crypto/internal/fips140/subtle for byte slice operations. // ConstantTimeCompare returns 1 if the two slices, x and y, have equal contents // and 0 otherwise. The time taken is a function of the length of the slices and @@ -19,17 +25,17 @@ func ConstantTimeCompare(x, y []byte) int { // ConstantTimeSelect returns x if v == 1 and y if v == 0. // Its behavior is undefined if v takes any other value. func ConstantTimeSelect(v, x, y int) int { - return subtle.ConstantTimeSelect(v, x, y) + return constanttime.Select(v, x, y) } // ConstantTimeByteEq returns 1 if x == y and 0 otherwise. func ConstantTimeByteEq(x, y uint8) int { - return subtle.ConstantTimeByteEq(x, y) + return constanttime.ByteEq(x, y) } // ConstantTimeEq returns 1 if x == y and 0 otherwise. func ConstantTimeEq(x, y int32) int { - return subtle.ConstantTimeEq(x, y) + return constanttime.Eq(x, y) } // ConstantTimeCopy copies the contents of y into x (a slice of equal length) @@ -42,5 +48,5 @@ func ConstantTimeCopy(v int, x, y []byte) { // ConstantTimeLessOrEq returns 1 if x <= y and 0 otherwise. // Its behavior is undefined if x or y are negative or > 2**31 - 1. func ConstantTimeLessOrEq(x, y int) int { - return subtle.ConstantTimeLessOrEq(x, y) + return constanttime.LessOrEq(x, y) } diff --git a/src/crypto/subtle/constant_time_test.go b/src/crypto/subtle/constant_time_test.go index c2ccd28ad70..9db1140134f 100644 --- a/src/crypto/subtle/constant_time_test.go +++ b/src/crypto/subtle/constant_time_test.go @@ -128,6 +128,17 @@ func TestConstantTimeLessOrEq(t *testing.T) { var benchmarkGlobal uint8 +func BenchmarkConstantTimeSelect(b *testing.B) { + x := int(benchmarkGlobal) + var y, z int + + for range b.N { + y, z, x = ConstantTimeSelect(x, y, z), y, z + } + + benchmarkGlobal = uint8(x) +} + func BenchmarkConstantTimeByteEq(b *testing.B) { var x, y uint8 diff --git a/src/crypto/tls/bogo_shim_test.go b/src/crypto/tls/bogo_shim_test.go index 7cab568db80..8f171d92595 100644 --- a/src/crypto/tls/bogo_shim_test.go +++ b/src/crypto/tls/bogo_shim_test.go @@ -11,8 +11,10 @@ import ( "encoding/base64" "encoding/json" "encoding/pem" + "errors" "flag" "fmt" + "html/template" "internal/byteorder" "internal/testenv" "io" @@ -25,10 +27,13 @@ import ( "strconv" "strings" "testing" + "time" "golang.org/x/crypto/cryptobyte" ) +const boringsslModVer = "v0.0.0-20250620172916-f51d8b099832" + var ( port = flag.String("port", "", "") server = flag.Bool("server", false, "") @@ -537,6 +542,7 @@ func orderlyShutdown(tlsConn *Conn) { } func TestBogoSuite(t *testing.T) { + testenv.MustHaveGoBuild(t) if testing.Short() { t.Skip("skipping in short mode") } @@ -555,9 +561,9 @@ func TestBogoSuite(t *testing.T) { var bogoDir string if *bogoLocalDir != "" { + ensureLocalBogo(t, *bogoLocalDir) bogoDir = *bogoLocalDir } else { - const boringsslModVer = "v0.0.0-20250620172916-f51d8b099832" bogoDir = cryptotest.FetchModule(t, "boringssl.googlesource.com/boringssl.git", boringsslModVer) } @@ -606,6 +612,12 @@ func TestBogoSuite(t *testing.T) { t.Fatalf("failed to parse results JSON: %s", err) } + if *bogoReport != "" { + if err := generateReport(results, *bogoReport); err != nil { + t.Fatalf("failed to generate report: %v", err) + } + } + // assertResults contains test results we want to make sure // are present in the output. They are only checked if -bogo-filter // was not passed. @@ -655,6 +667,66 @@ func TestBogoSuite(t *testing.T) { } } +// ensureLocalBogo fetches BoringSSL to localBogoDir at the correct revision +// (from boringsslModVer) if localBogoDir doesn't already exist. +// +// If localBogoDir does exist, ensureLocalBogo fails the test if it isn't +// a directory. +func ensureLocalBogo(t *testing.T, localBogoDir string) { + t.Helper() + + if stat, err := os.Stat(localBogoDir); err == nil { + if !stat.IsDir() { + t.Fatalf("local bogo dir (%q) exists but is not a directory", localBogoDir) + } + + t.Logf("using local bogo checkout from %q", localBogoDir) + return + } else if !errors.Is(err, os.ErrNotExist) { + t.Fatalf("failed to stat local bogo dir (%q): %v", localBogoDir, err) + } + + testenv.MustHaveExecPath(t, "git") + + idx := strings.LastIndex(boringsslModVer, "-") + if idx == -1 || idx == len(boringsslModVer)-1 { + t.Fatalf("invalid boringsslModVer format: %q", boringsslModVer) + } + commitSHA := boringsslModVer[idx+1:] + + t.Logf("cloning boringssl@%s to %q", commitSHA, localBogoDir) + cloneCmd := testenv.Command(t, "git", "clone", "--no-checkout", "https://boringssl.googlesource.com/boringssl", localBogoDir) + if err := cloneCmd.Run(); err != nil { + t.Fatalf("git clone failed: %v", err) + } + + checkoutCmd := testenv.Command(t, "git", "checkout", commitSHA) + checkoutCmd.Dir = localBogoDir + if err := checkoutCmd.Run(); err != nil { + t.Fatalf("git checkout failed: %v", err) + } + + t.Logf("using fresh local bogo checkout from %q", localBogoDir) + return +} + +func generateReport(results bogoResults, outPath string) error { + data := reportData{ + Results: results, + Timestamp: time.Unix(int64(results.SecondsSinceEpoch), 0).Format("2006-01-02 15:04:05"), + Revision: boringsslModVer, + } + + tmpl := template.Must(template.New("report").Parse(reportTemplate)) + file, err := os.Create(outPath) + if err != nil { + return err + } + defer file.Close() + + return tmpl.Execute(file, data) +} + // bogoResults is a copy of boringssl.googlesource.com/boringssl/testresults.Results type bogoResults struct { Version int `json:"version"` @@ -669,3 +741,127 @@ type bogoResults struct { Error string `json:"error,omitempty"` } `json:"tests"` } + +type reportData struct { + Results bogoResults + SkipReasons map[string]string + Timestamp string + Revision string +} + +const reportTemplate = ` + + + + BoGo Results Report + + + +

    BoGo Results Report

    + +
    + Generated: {{.Timestamp}} | BoGo Revision: {{.Revision}}
    + {{range $status, $count := .Results.NumFailuresByType}} + {{$status}}: {{$count}} | + {{end}} +
    + +
    + + +
    + + + + + + + + + + + + + {{range $name, $test := .Results.Tests}} + + + + + + + + {{end}} + +
    Test NameStatusActualExpectedError
    {{$name}}{{$test.Actual}}{{$test.Actual}}{{$test.Expected}}{{$test.Error}}
    + + + + +` diff --git a/src/crypto/tls/conn.go b/src/crypto/tls/conn.go index 09dc9ea94c9..2de120a1329 100644 --- a/src/crypto/tls/conn.go +++ b/src/crypto/tls/conn.go @@ -1578,9 +1578,9 @@ func (c *Conn) handshakeContext(ctx context.Context) (ret error) { // the handshake (RFC 9001, Section 5.7). c.quicSetReadSecret(QUICEncryptionLevelApplication, c.cipherSuite, c.in.trafficSecret) } else { - var a alert c.out.Lock() - if !errors.As(c.out.err, &a) { + a, ok := errors.AsType[alert](c.out.err) + if !ok { a = alertInternalError } c.out.Unlock() diff --git a/src/crypto/tls/fips140_test.go b/src/crypto/tls/fips140_test.go index d3fa61dc97d..291a19f44cd 100644 --- a/src/crypto/tls/fips140_test.go +++ b/src/crypto/tls/fips140_test.go @@ -404,7 +404,7 @@ func TestFIPSCertAlgs(t *testing.T) { L2_I := fipsCert(t, "L2_I", fipsRSAKey(t, 1024), I_R1, fipsCertLeaf) // client verifying server cert - testServerCert := func(t *testing.T, desc string, pool *x509.CertPool, key interface{}, list [][]byte, ok bool) { + testServerCert := func(t *testing.T, desc string, pool *x509.CertPool, key any, list [][]byte, ok bool) { clientConfig := testConfig.Clone() clientConfig.RootCAs = pool clientConfig.InsecureSkipVerify = false @@ -432,7 +432,7 @@ func TestFIPSCertAlgs(t *testing.T) { } // server verifying client cert - testClientCert := func(t *testing.T, desc string, pool *x509.CertPool, key interface{}, list [][]byte, ok bool) { + testClientCert := func(t *testing.T, desc string, pool *x509.CertPool, key any, list [][]byte, ok bool) { clientConfig := testConfig.Clone() clientConfig.ServerName = "example.com" clientConfig.Certificates = []Certificate{{Certificate: list, PrivateKey: key}} @@ -574,11 +574,11 @@ type fipsCertificate struct { parentOrg string der []byte cert *x509.Certificate - key interface{} + key any fipsOK bool } -func fipsCert(t *testing.T, name string, key interface{}, parent *fipsCertificate, mode int) *fipsCertificate { +func fipsCert(t *testing.T, name string, key any, parent *fipsCertificate, mode int) *fipsCertificate { org := name parentOrg := "" if i := strings.Index(org, "_"); i >= 0 { @@ -605,7 +605,7 @@ func fipsCert(t *testing.T, name string, key interface{}, parent *fipsCertificat } var pcert *x509.Certificate - var pkey interface{} + var pkey any if parent != nil { pcert = parent.cert pkey = parent.key @@ -614,7 +614,7 @@ func fipsCert(t *testing.T, name string, key interface{}, parent *fipsCertificat pkey = key } - var pub interface{} + var pub any var desc string switch k := key.(type) { case *rsa.PrivateKey: diff --git a/src/crypto/tls/handshake_client_test.go b/src/crypto/tls/handshake_client_test.go index 9c94016f133..6020c0f055c 100644 --- a/src/crypto/tls/handshake_client_test.go +++ b/src/crypto/tls/handshake_client_test.go @@ -2693,7 +2693,6 @@ func TestTLS13OnlyClientHelloCipherSuite(t *testing.T) { }, } for _, tt := range tls13Tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() testTLS13OnlyClientHelloCipherSuite(t, tt.ciphers) diff --git a/src/crypto/tls/handshake_server.go b/src/crypto/tls/handshake_server.go index 088c66fadb2..a2cf176a86c 100644 --- a/src/crypto/tls/handshake_server.go +++ b/src/crypto/tls/handshake_server.go @@ -965,10 +965,9 @@ func (c *Conn) processCertsFromClient(certificate Certificate) error { chains, err := certs[0].Verify(opts) if err != nil { - var errCertificateInvalid x509.CertificateInvalidError - if errors.As(err, &x509.UnknownAuthorityError{}) { + if _, ok := errors.AsType[x509.UnknownAuthorityError](err); ok { c.sendAlert(alertUnknownCA) - } else if errors.As(err, &errCertificateInvalid) && errCertificateInvalid.Reason == x509.Expired { + } else if errCertificateInvalid, ok := errors.AsType[x509.CertificateInvalidError](err); ok && errCertificateInvalid.Reason == x509.Expired { c.sendAlert(alertCertificateExpired) } else { c.sendAlert(alertBadCertificate) diff --git a/src/crypto/tls/handshake_server_test.go b/src/crypto/tls/handshake_server_test.go index 941f2a3373f..43183db2a19 100644 --- a/src/crypto/tls/handshake_server_test.go +++ b/src/crypto/tls/handshake_server_test.go @@ -403,8 +403,7 @@ func TestAlertForwarding(t *testing.T) { err := Server(s, testConfig).Handshake() s.Close() - var opErr *net.OpError - if !errors.As(err, &opErr) || opErr.Err != error(alertUnknownCA) { + if opErr, ok := errors.AsType[*net.OpError](err); !ok || opErr.Err != error(alertUnknownCA) { t.Errorf("Got error: %s; expected: %s", err, error(alertUnknownCA)) } } diff --git a/src/crypto/tls/handshake_test.go b/src/crypto/tls/handshake_test.go index ea8ac6fc837..3e2c5663087 100644 --- a/src/crypto/tls/handshake_test.go +++ b/src/crypto/tls/handshake_test.go @@ -46,7 +46,9 @@ var ( keyFile = flag.String("keylog", "", "destination file for KeyLogWriter") bogoMode = flag.Bool("bogo-mode", false, "Enabled bogo shim mode, ignore everything else") bogoFilter = flag.String("bogo-filter", "", "BoGo test filter") - bogoLocalDir = flag.String("bogo-local-dir", "", "Local BoGo to use, instead of fetching from source") + bogoLocalDir = flag.String("bogo-local-dir", "", + "If not-present, checkout BoGo into this dir, or otherwise use it as a pre-existing checkout") + bogoReport = flag.String("bogo-html-report", "", "File path to render an HTML report with BoGo results") ) func runTestAndUpdateIfNeeded(t *testing.T, name string, run func(t *testing.T, update bool), wait bool) { diff --git a/src/crypto/tls/quic.go b/src/crypto/tls/quic.go index 3be479eb12f..2ba2242b2d9 100644 --- a/src/crypto/tls/quic.go +++ b/src/crypto/tls/quic.go @@ -362,12 +362,11 @@ func quicError(err error) error { if err == nil { return nil } - var ae AlertError - if errors.As(err, &ae) { + if _, ok := errors.AsType[AlertError](err); ok { return err } - var a alert - if !errors.As(err, &a) { + a, ok := errors.AsType[alert](err) + if !ok { a = alertInternalError } // Return an error wrapping the original error and an AlertError. diff --git a/src/crypto/tls/quic_test.go b/src/crypto/tls/quic_test.go index f6e8c55d9d6..5f4b2b7707d 100644 --- a/src/crypto/tls/quic_test.go +++ b/src/crypto/tls/quic_test.go @@ -368,8 +368,7 @@ func TestQUICHandshakeError(t *testing.T) { if !errors.Is(err, AlertError(alertBadCertificate)) { t.Errorf("connection handshake terminated with error %q, want alertBadCertificate", err) } - var e *CertificateVerificationError - if !errors.As(err, &e) { + if _, ok := errors.AsType[*CertificateVerificationError](err); !ok { t.Errorf("connection handshake terminated with error %q, want CertificateVerificationError", err) } } diff --git a/src/crypto/tls/tls_test.go b/src/crypto/tls/tls_test.go index bfcc62ccfb8..6539009df62 100644 --- a/src/crypto/tls/tls_test.go +++ b/src/crypto/tls/tls_test.go @@ -1880,7 +1880,6 @@ func testVerifyCertificates(t *testing.T, version uint16) { rootCAs.AddCert(issuer) for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { t.Parallel() diff --git a/src/crypto/x509/bettertls_test.go b/src/crypto/x509/bettertls_test.go new file mode 100644 index 00000000000..3a0b98cd74c --- /dev/null +++ b/src/crypto/x509/bettertls_test.go @@ -0,0 +1,229 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This test uses Netflix's BetterTLS test suite to test the crypto/x509 +// path building and name constraint validation. +// +// The test data in JSON form is around 31MB, so we fetch the BetterTLS +// go module and use it to generate the JSON data on-the-fly in a tmp dir. +// +// For more information, see: +// https://github.com/netflix/bettertls +// https://netflixtechblog.com/bettertls-c9915cd255c0 + +package x509 + +import ( + "crypto/internal/cryptotest" + "encoding/base64" + "encoding/json" + "internal/testenv" + "os" + "path/filepath" + "testing" +) + +// TestBetterTLS runs the "pathbuilding" and "nameconstraints" suites of +// BetterTLS. +// +// The test cases in the pathbuilding suite are designed to test edge-cases +// for path building and validation. In particular, the ["chain of pain"][0] +// scenario where a validator treats path building as an operation with +// a single possible outcome, instead of many. +// +// The test cases in the nameconstraints suite are designed to test edge-cases +// for name constraint parsing and validation. +// +// [0]: https://medium.com/@sleevi_/path-building-vs-path-verifying-the-chain-of-pain-9fbab861d7d6 +func TestBetterTLS(t *testing.T) { + testenv.SkipIfShortAndSlow(t) + + data, roots := betterTLSTestData(t) + + for _, suite := range []string{"pathbuilding", "nameconstraints"} { + t.Run(suite, func(t *testing.T) { + runTestSuite(t, suite, &data, roots) + }) + } +} + +func runTestSuite(t *testing.T, suiteName string, data *betterTLS, roots *CertPool) { + suite, exists := data.Suites[suiteName] + if !exists { + t.Fatalf("missing %s suite", suiteName) + } + + t.Logf( + "running %s test suite with %d test cases", + suiteName, len(suite.TestCases)) + + for _, tc := range suite.TestCases { + t.Logf("testing %s test case %d", suiteName, tc.ID) + + certsDER, err := tc.Certs() + if err != nil { + t.Fatalf( + "failed to decode certificates for test case %d: %v", + tc.ID, err) + } + + if len(certsDER) == 0 { + t.Fatalf("test case %d has no certificates", tc.ID) + } + + eeCert, err := ParseCertificate(certsDER[0]) + if err != nil { + // Several constraint test cases contain invalid end-entity + // certificate extensions that we reject ahead of verification + // time. We consider this a pass and skip further processing. + // + // For example, a SAN with a uniformResourceIdentifier general name + // containing the value `"http://foo.bar, DNS:test.localhost"`, or + // an iPAddress general name of the wrong length. + if suiteName == "nameconstraints" && tc.Expected == expectedReject { + t.Logf( + "skipping expected reject test case %d "+ + "- end entity certificate parse error: %v", + tc.ID, err) + continue + } + t.Fatalf( + "failed to parse end entity certificate for test case %d: %v", + tc.ID, err) + } + + intermediates := NewCertPool() + for i, certDER := range certsDER[1:] { + cert, err := ParseCertificate(certDER) + if err != nil { + t.Fatalf( + "failed to parse intermediate certificate %d for test case %d: %v", + i+1, tc.ID, err) + } + intermediates.AddCert(cert) + } + + _, err = eeCert.Verify(VerifyOptions{ + Roots: roots, + Intermediates: intermediates, + DNSName: tc.Hostname, + KeyUsages: []ExtKeyUsage{ExtKeyUsageServerAuth}, + }) + + switch tc.Expected { + case expectedAccept: + if err != nil { + t.Errorf( + "test case %d failed: expected success, got error: %v", + tc.ID, err) + } + case expectedReject: + if err == nil { + t.Errorf( + "test case %d failed: expected failure, but verification succeeded", + tc.ID) + } + default: + t.Fatalf( + "test case %d failed: unknown expected result: %s", + tc.ID, tc.Expected) + } + } +} + +func betterTLSTestData(t *testing.T) (betterTLS, *CertPool) { + const ( + bettertlsModule = "github.com/Netflix/bettertls" + bettertlsVersion = "v0.0.0-20250909192348-e1e99e353074" + ) + + bettertlsDir := cryptotest.FetchModule(t, bettertlsModule, bettertlsVersion) + + tempDir := t.TempDir() + testsJSONPath := filepath.Join(tempDir, "tests.json") + + cmd := testenv.Command(t, testenv.GoToolPath(t), + "run", "./test-suites/cmd/bettertls", + "export-tests", + "--out", testsJSONPath) + cmd.Dir = bettertlsDir + + t.Log("running bettertls export-tests command") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf( + "failed to run bettertls export-tests: %v\nOutput: %s", + err, output) + } + + jsonData, err := os.ReadFile(testsJSONPath) + if err != nil { + t.Fatalf("failed to read exported tests.json: %v", err) + } + + t.Logf("successfully loaded tests.json at %s", testsJSONPath) + + var data betterTLS + if err := json.Unmarshal(jsonData, &data); err != nil { + t.Fatalf("failed to unmarshal JSON data: %v", err) + } + + t.Logf("testing betterTLS revision: %s", data.Revision) + t.Logf("number of test suites: %d", len(data.Suites)) + + rootDER, err := data.RootCert() + if err != nil { + t.Fatalf("failed to decode trust root: %v", err) + } + + rootCert, err := ParseCertificate(rootDER) + if err != nil { + t.Fatalf("failed to parse trust root certificate: %v", err) + } + + roots := NewCertPool() + roots.AddCert(rootCert) + + return data, roots +} + +type betterTLS struct { + Revision string `json:"betterTlsRevision"` + Root string `json:"trustRoot"` + Suites map[string]betterTLSSuite `json:"suites"` +} + +func (b *betterTLS) RootCert() ([]byte, error) { + return base64.StdEncoding.DecodeString(b.Root) +} + +type betterTLSSuite struct { + TestCases []betterTLSTest `json:"testCases"` +} + +type betterTLSTest struct { + ID uint32 `json:"id"` + Certificates []string `json:"certificates"` + Hostname string `json:"hostname"` + Expected expectedResult `json:"expected"` +} + +func (test *betterTLSTest) Certs() ([][]byte, error) { + certs := make([][]byte, len(test.Certificates)) + for i, cert := range test.Certificates { + decoded, err := base64.StdEncoding.DecodeString(cert) + if err != nil { + return nil, err + } + certs[i] = decoded + } + return certs, nil +} + +type expectedResult string + +const ( + expectedAccept expectedResult = "ACCEPT" + expectedReject expectedResult = "REJECT" +) diff --git a/src/crypto/x509/oid.go b/src/crypto/x509/oid.go index b1464346b6b..c60daa7540c 100644 --- a/src/crypto/x509/oid.go +++ b/src/crypto/x509/oid.go @@ -286,7 +286,7 @@ func (oid OID) EqualASN1OID(other asn1.ObjectIdentifier) bool { return i == len(other) } -// Strings returns the string representation of the Object Identifier. +// String returns the string representation of the Object Identifier. func (oid OID) String() string { var b strings.Builder b.Grow(32) diff --git a/src/crypto/x509/parser.go b/src/crypto/x509/parser.go index 4abcc1b7b59..680dcee203a 100644 --- a/src/crypto/x509/parser.go +++ b/src/crypto/x509/parser.go @@ -429,10 +429,8 @@ func parseSANExtension(der cryptobyte.String) (dnsNames, emailAddresses []string if err != nil { return fmt.Errorf("x509: cannot parse URI %q: %s", uriStr, err) } - if len(uri.Host) > 0 { - if _, ok := domainToReverseLabels(uri.Host); !ok { - return fmt.Errorf("x509: cannot parse URI %q: invalid domain", uriStr) - } + if len(uri.Host) > 0 && !domainNameValid(uri.Host, false) { + return fmt.Errorf("x509: cannot parse URI %q: invalid domain", uriStr) } uris = append(uris, uri) case nameTypeIP: @@ -598,15 +596,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error()) } - trimmedDomain := domain - if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' { - // constraints can have a leading - // period to exclude the domain - // itself, but that's not valid in a - // normal domain name. - trimmedDomain = trimmedDomain[1:] - } - if _, ok := domainToReverseLabels(trimmedDomain); !ok { + if !domainNameValid(domain, true) { return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse dnsName constraint %q", domain) } dnsNames = append(dnsNames, domain) @@ -647,12 +637,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint) } } else { - // Otherwise it's a domain name. - domain := constraint - if len(domain) > 0 && domain[0] == '.' { - domain = domain[1:] - } - if _, ok := domainToReverseLabels(domain); !ok { + if !domainNameValid(constraint, true) { return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint) } } @@ -668,15 +653,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q: cannot be IP address", domain) } - trimmedDomain := domain - if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' { - // constraints can have a leading - // period to exclude the domain itself, - // but that's not valid in a normal - // domain name. - trimmedDomain = trimmedDomain[1:] - } - if _, ok := domainToReverseLabels(trimmedDomain); !ok { + if !domainNameValid(domain, true) { return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q", domain) } uriDomains = append(uriDomains, domain) @@ -1317,3 +1294,62 @@ func ParseRevocationList(der []byte) (*RevocationList, error) { return rl, nil } + +// domainNameValid is an alloc-less version of the checks that +// domainToReverseLabels does. +func domainNameValid(s string, constraint bool) bool { + // TODO(#75835): This function omits a number of checks which we + // really should be doing to enforce that domain names are valid names per + // RFC 1034. We previously enabled these checks, but this broke a + // significant number of certificates we previously considered valid, and we + // happily create via CreateCertificate (et al). We should enable these + // checks, but will need to gate them behind a GODEBUG. + // + // I have left the checks we previously enabled, noted with "TODO(#75835)" so + // that we can easily re-enable them once we unbreak everyone. + + // TODO(#75835): this should only be true for constraints. + if len(s) == 0 { + return true + } + + // Do not allow trailing period (FQDN format is not allowed in SANs or + // constraints). + if s[len(s)-1] == '.' { + return false + } + + // TODO(#75835): domains must have at least one label, cannot have + // a leading empty label, and cannot be longer than 253 characters. + // if len(s) == 0 || (!constraint && s[0] == '.') || len(s) > 253 { + // return false + // } + + lastDot := -1 + if constraint && s[0] == '.' { + s = s[1:] + } + + for i := 0; i <= len(s); i++ { + if i < len(s) && (s[i] < 33 || s[i] > 126) { + // Invalid character. + return false + } + if i == len(s) || s[i] == '.' { + labelLen := i + if lastDot >= 0 { + labelLen -= lastDot + 1 + } + if labelLen == 0 { + return false + } + // TODO(#75835): labels cannot be longer than 63 characters. + // if labelLen > 63 { + // return false + // } + lastDot = i + } + } + + return true +} diff --git a/src/crypto/x509/parser_test.go b/src/crypto/x509/parser_test.go index 3b9d9aed826..d53b805b786 100644 --- a/src/crypto/x509/parser_test.go +++ b/src/crypto/x509/parser_test.go @@ -5,9 +5,13 @@ package x509 import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" "encoding/asn1" "encoding/pem" "os" + "strings" "testing" cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" @@ -251,3 +255,106 @@ d5l1tRhScKu2NBgm74nYmJxJYgvuTA38wGhRrGU= } } } + +func TestDomainNameValid(t *testing.T) { + for _, tc := range []struct { + name string + dnsName string + constraint bool + valid bool + }{ + // TODO(#75835): these tests are for stricter name validation, which we + // had to disable. Once we reenable these strict checks, behind a + // GODEBUG, we should add them back in. + // {"empty name, name", "", false, false}, + // {"254 char label, name", strings.Repeat("a.a", 84) + "aaa", false, false}, + // {"254 char label, constraint", strings.Repeat("a.a", 84) + "aaa", true, false}, + // {"253 char label, name", strings.Repeat("a.a", 84) + "aa", false, false}, + // {"253 char label, constraint", strings.Repeat("a.a", 84) + "aa", true, false}, + // {"64 char single label, name", strings.Repeat("a", 64), false, false}, + // {"64 char single label, constraint", strings.Repeat("a", 64), true, false}, + // {"64 char label, name", "a." + strings.Repeat("a", 64), false, false}, + // {"64 char label, constraint", "a." + strings.Repeat("a", 64), true, false}, + + // TODO(#75835): these are the inverse of the tests above, they should be removed + // once the strict checking is enabled. + {"254 char label, name", strings.Repeat("a.a", 84) + "aaa", false, true}, + {"254 char label, constraint", strings.Repeat("a.a", 84) + "aaa", true, true}, + {"253 char label, name", strings.Repeat("a.a", 84) + "aa", false, true}, + {"253 char label, constraint", strings.Repeat("a.a", 84) + "aa", true, true}, + {"64 char single label, name", strings.Repeat("a", 64), false, true}, + {"64 char single label, constraint", strings.Repeat("a", 64), true, true}, + {"64 char label, name", "a." + strings.Repeat("a", 64), false, true}, + {"64 char label, constraint", "a." + strings.Repeat("a", 64), true, true}, + + // Check we properly enforce properties of domain names. + {"empty name, constraint", "", true, true}, + {"empty label, name", "a..a", false, false}, + {"empty label, constraint", "a..a", true, false}, + {"period, name", ".", false, false}, + {"period, constraint", ".", true, false}, // TODO(roland): not entirely clear if this is a valid constraint (require at least one label?) + {"valid, name", "a.b.c", false, true}, + {"valid, constraint", "a.b.c", true, true}, + {"leading period, name", ".a.b.c", false, false}, + {"leading period, constraint", ".a.b.c", true, true}, + {"trailing period, name", "a.", false, false}, + {"trailing period, constraint", "a.", true, false}, + {"bare label, name", "a", false, true}, + {"bare label, constraint", "a", true, true}, + {"63 char single label, name", strings.Repeat("a", 63), false, true}, + {"63 char single label, constraint", strings.Repeat("a", 63), true, true}, + {"63 char label, name", "a." + strings.Repeat("a", 63), false, true}, + {"63 char label, constraint", "a." + strings.Repeat("a", 63), true, true}, + } { + t.Run(tc.name, func(t *testing.T) { + valid := domainNameValid(tc.dnsName, tc.constraint) + if tc.valid != valid { + t.Errorf("domainNameValid(%q, %t) = %v; want %v", tc.dnsName, tc.constraint, !tc.valid, tc.valid) + } + // Also check that we enforce the same properties as domainToReverseLabels + trimmedName := tc.dnsName + if tc.constraint && len(trimmedName) > 1 && trimmedName[0] == '.' { + trimmedName = trimmedName[1:] + } + _, revValid := domainToReverseLabels(trimmedName) + if valid != revValid { + t.Errorf("domainNameValid(%q, %t) = %t != domainToReverseLabels(%q) = %t", tc.dnsName, tc.constraint, valid, trimmedName, revValid) + } + }) + } +} + +func TestRoundtripWeirdSANs(t *testing.T) { + // TODO(#75835): check that certificates we create with CreateCertificate that have malformed SAN values + // can be parsed by ParseCertificate. We should eventually restrict this, but for now we have to maintain + // this property as people have been relying on it. + k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + badNames := []string{ + "baredomain", + "baredomain.", + strings.Repeat("a", 255), + strings.Repeat("a", 65) + ".com", + } + tmpl := &Certificate{ + EmailAddresses: badNames, + DNSNames: badNames, + } + b, err := CreateCertificate(rand.Reader, tmpl, tmpl, &k.PublicKey, k) + if err != nil { + t.Fatal(err) + } + _, err = ParseCertificate(b) + if err != nil { + t.Fatalf("Couldn't roundtrip certificate: %v", err) + } +} + +func FuzzDomainNameValid(f *testing.F) { + f.Fuzz(func(t *testing.T, data string) { + domainNameValid(data, false) + domainNameValid(data, true) + }) +} diff --git a/src/crypto/x509/platform_test.go b/src/crypto/x509/platform_test.go index b425e02f3bc..44ceff43f45 100644 --- a/src/crypto/x509/platform_test.go +++ b/src/crypto/x509/platform_test.go @@ -202,7 +202,6 @@ func TestPlatformVerifier(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() parent := testRoot diff --git a/src/crypto/x509/verify.go b/src/crypto/x509/verify.go index 7cc0fb2e3e0..12e59335b2d 100644 --- a/src/crypto/x509/verify.go +++ b/src/crypto/x509/verify.go @@ -17,6 +17,7 @@ import ( "net/url" "reflect" "runtime" + "slices" "strings" "time" "unicode/utf8" @@ -391,6 +392,7 @@ func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) { // domainToReverseLabels converts a textual domain name like foo.example.com to // the list of labels in reverse order, e.g. ["com", "example", "foo"]. func domainToReverseLabels(domain string) (reverseLabels []string, ok bool) { + reverseLabels = make([]string, 0, strings.Count(domain, ".")+1) for len(domain) > 0 { if i := strings.LastIndexByte(domain, '.'); i == -1 { reverseLabels = append(reverseLabels, domain) @@ -428,7 +430,7 @@ func domainToReverseLabels(domain string) (reverseLabels []string, ok bool) { return reverseLabels, true } -func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string) (bool, error) { +func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string, reversedDomainsCache map[string][]string, reversedConstraintsCache map[string][]string) (bool, error) { // If the constraint contains an @, then it specifies an exact mailbox // name. if strings.Contains(constraint, "@") { @@ -441,10 +443,10 @@ func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string) (bool, erro // Otherwise the constraint is like a DNS constraint of the domain part // of the mailbox. - return matchDomainConstraint(mailbox.domain, constraint) + return matchDomainConstraint(mailbox.domain, constraint, reversedDomainsCache, reversedConstraintsCache) } -func matchURIConstraint(uri *url.URL, constraint string) (bool, error) { +func matchURIConstraint(uri *url.URL, constraint string, reversedDomainsCache map[string][]string, reversedConstraintsCache map[string][]string) (bool, error) { // From RFC 5280, Section 4.2.1.10: // “a uniformResourceIdentifier that does not include an authority // component with a host name specified as a fully qualified domain @@ -473,7 +475,7 @@ func matchURIConstraint(uri *url.URL, constraint string) (bool, error) { return false, fmt.Errorf("URI with IP (%q) cannot be matched against constraints", uri.String()) } - return matchDomainConstraint(host, constraint) + return matchDomainConstraint(host, constraint, reversedDomainsCache, reversedConstraintsCache) } func matchIPConstraint(ip net.IP, constraint *net.IPNet) (bool, error) { @@ -490,16 +492,21 @@ func matchIPConstraint(ip net.IP, constraint *net.IPNet) (bool, error) { return true, nil } -func matchDomainConstraint(domain, constraint string) (bool, error) { +func matchDomainConstraint(domain, constraint string, reversedDomainsCache map[string][]string, reversedConstraintsCache map[string][]string) (bool, error) { // The meaning of zero length constraints is not specified, but this // code follows NSS and accepts them as matching everything. if len(constraint) == 0 { return true, nil } - domainLabels, ok := domainToReverseLabels(domain) - if !ok { - return false, fmt.Errorf("x509: internal error: cannot parse domain %q", domain) + domainLabels, found := reversedDomainsCache[domain] + if !found { + var ok bool + domainLabels, ok = domainToReverseLabels(domain) + if !ok { + return false, fmt.Errorf("x509: internal error: cannot parse domain %q", domain) + } + reversedDomainsCache[domain] = domainLabels } // RFC 5280 says that a leading period in a domain name means that at @@ -513,9 +520,14 @@ func matchDomainConstraint(domain, constraint string) (bool, error) { constraint = constraint[1:] } - constraintLabels, ok := domainToReverseLabels(constraint) - if !ok { - return false, fmt.Errorf("x509: internal error: cannot parse domain %q", constraint) + constraintLabels, found := reversedConstraintsCache[constraint] + if !found { + var ok bool + constraintLabels, ok = domainToReverseLabels(constraint) + if !ok { + return false, fmt.Errorf("x509: internal error: cannot parse domain %q", constraint) + } + reversedConstraintsCache[constraint] = constraintLabels } if len(domainLabels) < len(constraintLabels) || @@ -624,94 +636,8 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V } } - maxConstraintComparisons := opts.MaxConstraintComparisions - if maxConstraintComparisons == 0 { - maxConstraintComparisons = 250000 - } - comparisonCount := 0 - - if certType == intermediateCertificate || certType == rootCertificate { - if len(currentChain) == 0 { - return errors.New("x509: internal error: empty chain when appending CA cert") - } - } - - if (certType == intermediateCertificate || certType == rootCertificate) && - c.hasNameConstraints() { - toCheck := []*Certificate{} - for _, c := range currentChain { - if c.hasSANExtension() { - toCheck = append(toCheck, c) - } - } - for _, sanCert := range toCheck { - err := forEachSAN(sanCert.getSANExtension(), func(tag int, data []byte) error { - switch tag { - case nameTypeEmail: - name := string(data) - mailbox, ok := parseRFC2821Mailbox(name) - if !ok { - return fmt.Errorf("x509: cannot parse rfc822Name %q", mailbox) - } - - if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "email address", name, mailbox, - func(parsedName, constraint any) (bool, error) { - return matchEmailConstraint(parsedName.(rfc2821Mailbox), constraint.(string)) - }, c.PermittedEmailAddresses, c.ExcludedEmailAddresses); err != nil { - return err - } - - case nameTypeDNS: - name := string(data) - if _, ok := domainToReverseLabels(name); !ok { - return fmt.Errorf("x509: cannot parse dnsName %q", name) - } - - if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "DNS name", name, name, - func(parsedName, constraint any) (bool, error) { - return matchDomainConstraint(parsedName.(string), constraint.(string)) - }, c.PermittedDNSDomains, c.ExcludedDNSDomains); err != nil { - return err - } - - case nameTypeURI: - name := string(data) - uri, err := url.Parse(name) - if err != nil { - return fmt.Errorf("x509: internal error: URI SAN %q failed to parse", name) - } - - if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "URI", name, uri, - func(parsedName, constraint any) (bool, error) { - return matchURIConstraint(parsedName.(*url.URL), constraint.(string)) - }, c.PermittedURIDomains, c.ExcludedURIDomains); err != nil { - return err - } - - case nameTypeIP: - ip := net.IP(data) - if l := len(ip); l != net.IPv4len && l != net.IPv6len { - return fmt.Errorf("x509: internal error: IP SAN %x failed to parse", data) - } - - if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "IP address", ip.String(), ip, - func(parsedName, constraint any) (bool, error) { - return matchIPConstraint(parsedName.(net.IP), constraint.(*net.IPNet)) - }, c.PermittedIPRanges, c.ExcludedIPRanges); err != nil { - return err - } - - default: - // Unknown SAN types are ignored. - } - - return nil - }) - - if err != nil { - return err - } - } + if (certType == intermediateCertificate || certType == rootCertificate) && len(currentChain) == 0 { + return errors.New("x509: internal error: empty chain when appending CA cert") } // KeyUsage status flags are ignored. From Engineering Security, Peter @@ -777,7 +703,7 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V // Certificates other than c in the returned chains should not be modified. // // WARNING: this function doesn't do any revocation checking. -func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err error) { +func (c *Certificate) Verify(opts VerifyOptions) ([][]*Certificate, error) { // Platform-specific verification needs the ASN.1 contents so // this makes the behavior consistent across platforms. if len(c.Raw) == 0 { @@ -819,15 +745,15 @@ func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err e } } - err = c.isValid(leafCertificate, nil, &opts) + err := c.isValid(leafCertificate, nil, &opts) if err != nil { - return + return nil, err } if len(opts.DNSName) > 0 { err = c.VerifyHostname(opts.DNSName) if err != nil { - return + return nil, err } } @@ -841,26 +767,12 @@ func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err e } } - chains = make([][]*Certificate, 0, len(candidateChains)) - - var invalidPoliciesChains int - for _, candidate := range candidateChains { - if !policiesValid(candidate, opts) { - invalidPoliciesChains++ - continue - } - chains = append(chains, candidate) - } - - if len(chains) == 0 { - return nil, CertificateInvalidError{c, NoValidChains, "all candidate chains have invalid policies"} - } - + anyKeyUsage := false for _, eku := range opts.KeyUsages { if eku == ExtKeyUsageAny { - // If any key usage is acceptable, no need to check the chain for - // key usages. - return chains, nil + // The presence of anyExtendedKeyUsage overrides any other key usage. + anyKeyUsage = true + break } } @@ -868,34 +780,48 @@ func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err e opts.KeyUsages = []ExtKeyUsage{ExtKeyUsageServerAuth} } - candidateChains = chains - chains = chains[:0] - + var invalidPoliciesChains int var incompatibleKeyUsageChains int - for _, candidate := range candidateChains { - if !checkChainForKeyUsage(candidate, opts.KeyUsages) { - incompatibleKeyUsageChains++ - continue + var constraintsHintErr error + candidateChains = slices.DeleteFunc(candidateChains, func(chain []*Certificate) bool { + if !policiesValid(chain, opts) { + invalidPoliciesChains++ + return true } - chains = append(chains, candidate) - } + // If any key usage is acceptable, no need to check the chain for + // key usages. + if !anyKeyUsage && !checkChainForKeyUsage(chain, opts.KeyUsages) { + incompatibleKeyUsageChains++ + return true + } + if err := checkChainConstraints(chain, opts); err != nil { + if constraintsHintErr == nil { + constraintsHintErr = err + } + return true + } + return false + }) - if len(chains) == 0 { + if len(candidateChains) == 0 { + if constraintsHintErr != nil { + return nil, constraintsHintErr // Preserve previous constraint behavior + } var details []string if incompatibleKeyUsageChains > 0 { if invalidPoliciesChains == 0 { return nil, CertificateInvalidError{c, IncompatibleUsage, ""} } - details = append(details, fmt.Sprintf("%d chains with incompatible key usage", incompatibleKeyUsageChains)) + details = append(details, fmt.Sprintf("%d candidate chains with incompatible key usage", incompatibleKeyUsageChains)) } if invalidPoliciesChains > 0 { - details = append(details, fmt.Sprintf("%d chains with invalid policies", invalidPoliciesChains)) + details = append(details, fmt.Sprintf("%d candidate chains with invalid policies", invalidPoliciesChains)) } err = CertificateInvalidError{c, NoValidChains, strings.Join(details, ", ")} return nil, err } - return chains, nil + return candidateChains, nil } func appendToFreshChain(chain []*Certificate, cert *Certificate) []*Certificate { @@ -927,7 +853,10 @@ func alreadyInChain(candidate *Certificate, chain []*Certificate) bool { if !bytes.Equal(candidate.RawSubject, cert.RawSubject) { continue } - if !candidate.PublicKey.(pubKeyEqual).Equal(cert.PublicKey) { + // We enforce the canonical encoding of SPKI (by only allowing the + // correct AI paremeter encodings in parseCertificate), so it's safe to + // directly compare the raw bytes. + if !bytes.Equal(candidate.RawSubjectPublicKeyInfo, cert.RawSubjectPublicKeyInfo) { continue } var certSAN *pkix.Extension @@ -1253,6 +1182,106 @@ NextCert: return true } +func checkChainConstraints(chain []*Certificate, opts VerifyOptions) error { + maxConstraintComparisons := opts.MaxConstraintComparisions + if maxConstraintComparisons == 0 { + maxConstraintComparisons = 250000 + } + comparisonCount := 0 + + // Each time we do constraint checking, we need to check the constraints in + // the current certificate against all of the names that preceded it. We + // reverse these names using domainToReverseLabels, which is a relatively + // expensive operation. Since we check each name against each constraint, + // this requires us to do N*C calls to domainToReverseLabels (where N is the + // total number of names that preceed the certificate, and C is the total + // number of constraints in the certificate). By caching the results of + // calling domainToReverseLabels, we can reduce that to N+C calls at the + // cost of keeping all of the parsed names and constraints in memory until + // we return from isValid. + reversedDomainsCache := map[string][]string{} + reversedConstraintsCache := map[string][]string{} + + for i, c := range chain { + if !c.hasNameConstraints() { + continue + } + for _, sanCert := range chain[:i] { + if !sanCert.hasSANExtension() { + continue + } + err := forEachSAN(sanCert.getSANExtension(), func(tag int, data []byte) error { + switch tag { + case nameTypeEmail: + name := string(data) + mailbox, ok := parseRFC2821Mailbox(name) + if !ok { + return fmt.Errorf("x509: cannot parse rfc822Name %q", mailbox) + } + + if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "email address", name, mailbox, + func(parsedName, constraint any) (bool, error) { + return matchEmailConstraint(parsedName.(rfc2821Mailbox), constraint.(string), reversedDomainsCache, reversedConstraintsCache) + }, c.PermittedEmailAddresses, c.ExcludedEmailAddresses); err != nil { + return err + } + + case nameTypeDNS: + name := string(data) + if !domainNameValid(name, false) { + return fmt.Errorf("x509: cannot parse dnsName %q", name) + } + + if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "DNS name", name, name, + func(parsedName, constraint any) (bool, error) { + return matchDomainConstraint(parsedName.(string), constraint.(string), reversedDomainsCache, reversedConstraintsCache) + }, c.PermittedDNSDomains, c.ExcludedDNSDomains); err != nil { + return err + } + + case nameTypeURI: + name := string(data) + uri, err := url.Parse(name) + if err != nil { + return fmt.Errorf("x509: internal error: URI SAN %q failed to parse", name) + } + + if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "URI", name, uri, + func(parsedName, constraint any) (bool, error) { + return matchURIConstraint(parsedName.(*url.URL), constraint.(string), reversedDomainsCache, reversedConstraintsCache) + }, c.PermittedURIDomains, c.ExcludedURIDomains); err != nil { + return err + } + + case nameTypeIP: + ip := net.IP(data) + if l := len(ip); l != net.IPv4len && l != net.IPv6len { + return fmt.Errorf("x509: internal error: IP SAN %x failed to parse", data) + } + + if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "IP address", ip.String(), ip, + func(parsedName, constraint any) (bool, error) { + return matchIPConstraint(parsedName.(net.IP), constraint.(*net.IPNet)) + }, c.PermittedIPRanges, c.ExcludedIPRanges); err != nil { + return err + } + + default: + // Unknown SAN types are ignored. + } + + return nil + }) + + if err != nil { + return err + } + } + } + + return nil +} + func mustNewOIDFromInts(ints []uint64) OID { oid, err := OIDFromInts(ints) if err != nil { diff --git a/src/crypto/x509/verify_test.go b/src/crypto/x509/verify_test.go index 7991f49946d..17e7aaf897a 100644 --- a/src/crypto/x509/verify_test.go +++ b/src/crypto/x509/verify_test.go @@ -6,6 +6,7 @@ package x509 import ( "crypto" + "crypto/dsa" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" @@ -1351,7 +1352,7 @@ var nameConstraintTests = []struct { func TestNameConstraints(t *testing.T) { for i, test := range nameConstraintTests { - result, err := matchDomainConstraint(test.domain, test.constraint) + result, err := matchDomainConstraint(test.domain, test.constraint, map[string][]string{}, map[string][]string{}) if err != nil && !test.expectError { t.Errorf("unexpected error for test #%d: domain=%s, constraint=%s, err=%s", i, test.domain, test.constraint, err) @@ -3030,7 +3031,7 @@ func TestInvalidPolicyWithAnyKeyUsage(t *testing.T) { testOID3 := mustNewOIDFromInts([]uint64{1, 2, 840, 113554, 4, 1, 72585, 2, 3}) root, intermediate, leaf := loadTestCert(t, "testdata/policy_root.pem"), loadTestCert(t, "testdata/policy_intermediate_require.pem"), loadTestCert(t, "testdata/policy_leaf.pem") - expectedErr := "x509: no valid chains built: all candidate chains have invalid policies" + expectedErr := "x509: no valid chains built: 1 candidate chains with invalid policies" roots, intermediates := NewCertPool(), NewCertPool() roots.AddCert(root) @@ -3048,3 +3049,129 @@ func TestInvalidPolicyWithAnyKeyUsage(t *testing.T) { t.Fatalf("unexpected error, got %q, want %q", err, expectedErr) } } + +func TestCertificateChainSignedByECDSA(t *testing.T) { + caKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + root := &Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{CommonName: "X"}, + NotBefore: time.Now().Add(-time.Hour), + NotAfter: time.Now().Add(365 * 24 * time.Hour), + IsCA: true, + KeyUsage: KeyUsageCertSign | KeyUsageCRLSign, + BasicConstraintsValid: true, + } + caDER, err := CreateCertificate(rand.Reader, root, root, &caKey.PublicKey, caKey) + if err != nil { + t.Fatal(err) + } + root, err = ParseCertificate(caDER) + if err != nil { + t.Fatal(err) + } + + leafKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + leaf := &Certificate{ + SerialNumber: big.NewInt(42), + Subject: pkix.Name{CommonName: "leaf"}, + NotBefore: time.Now().Add(-10 * time.Minute), + NotAfter: time.Now().Add(24 * time.Hour), + KeyUsage: KeyUsageDigitalSignature, + ExtKeyUsage: []ExtKeyUsage{ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + leafDER, err := CreateCertificate(rand.Reader, leaf, root, &leafKey.PublicKey, caKey) + if err != nil { + t.Fatal(err) + } + leaf, err = ParseCertificate(leafDER) + if err != nil { + t.Fatal(err) + } + + inter, err := ParseCertificate(dsaSelfSignedCNX(t)) + if err != nil { + t.Fatal(err) + } + + inters := NewCertPool() + inters.AddCert(root) + inters.AddCert(inter) + + wantErr := "certificate signed by unknown authority" + _, err = leaf.Verify(VerifyOptions{Intermediates: inters, Roots: NewCertPool()}) + if !strings.Contains(err.Error(), wantErr) { + t.Errorf("got %v, want %q", err, wantErr) + } +} + +// dsaSelfSignedCNX produces DER-encoded +// certificate with the properties: +// +// Subject=Issuer=CN=X +// DSA SPKI +// Matching inner/outer signature OIDs +// Dummy ECDSA signature +func dsaSelfSignedCNX(t *testing.T) []byte { + t.Helper() + var params dsa.Parameters + if err := dsa.GenerateParameters(¶ms, rand.Reader, dsa.L1024N160); err != nil { + t.Fatal(err) + } + + var dsaPriv dsa.PrivateKey + dsaPriv.Parameters = params + if err := dsa.GenerateKey(&dsaPriv, rand.Reader); err != nil { + t.Fatal(err) + } + dsaPub := &dsaPriv.PublicKey + + type dsaParams struct{ P, Q, G *big.Int } + paramDER, err := asn1.Marshal(dsaParams{dsaPub.P, dsaPub.Q, dsaPub.G}) + if err != nil { + t.Fatal(err) + } + yDER, err := asn1.Marshal(dsaPub.Y) + if err != nil { + t.Fatal(err) + } + + spki := publicKeyInfo{ + Algorithm: pkix.AlgorithmIdentifier{ + Algorithm: oidPublicKeyDSA, + Parameters: asn1.RawValue{FullBytes: paramDER}, + }, + PublicKey: asn1.BitString{Bytes: yDER, BitLength: 8 * len(yDER)}, + } + + rdn := pkix.Name{CommonName: "X"}.ToRDNSequence() + b, err := asn1.Marshal(rdn) + if err != nil { + t.Fatal(err) + } + rawName := asn1.RawValue{FullBytes: b} + + algoIdent := pkix.AlgorithmIdentifier{Algorithm: oidSignatureDSAWithSHA256} + tbs := tbsCertificate{ + Version: 0, + SerialNumber: big.NewInt(1002), + SignatureAlgorithm: algoIdent, + Issuer: rawName, + Validity: validity{NotBefore: time.Now().Add(-time.Hour), NotAfter: time.Now().Add(24 * time.Hour)}, + Subject: rawName, + PublicKey: spki, + } + c := certificate{ + TBSCertificate: tbs, + SignatureAlgorithm: algoIdent, + SignatureValue: asn1.BitString{Bytes: []byte{0}, BitLength: 8}, + } + dsaDER, err := asn1.Marshal(c) + if err != nil { + t.Fatal(err) + } + return dsaDER +} diff --git a/src/database/sql/sql_test.go b/src/database/sql/sql_test.go index f706610b87e..c3f228ef0ba 100644 --- a/src/database/sql/sql_test.go +++ b/src/database/sql/sql_test.go @@ -2939,7 +2939,6 @@ func TestConnExpiresFreshOutOfPool(t *testing.T) { db.SetMaxOpenConns(1) for _, ec := range execCases { - ec := ec name := fmt.Sprintf("expired=%t,badReset=%t", ec.expired, ec.badReset) t.Run(name, func(t *testing.T) { db.clearAllConns(t) diff --git a/src/debug/buildinfo/buildinfo.go b/src/debug/buildinfo/buildinfo.go index 12e3b750d23..d202d5050a2 100644 --- a/src/debug/buildinfo/buildinfo.go +++ b/src/debug/buildinfo/buildinfo.go @@ -67,7 +67,7 @@ const ( // with module support. func ReadFile(name string) (info *BuildInfo, err error) { defer func() { - if pathErr := (*fs.PathError)(nil); errors.As(err, &pathErr) { + if _, ok := errors.AsType[*fs.PathError](err); ok { err = fmt.Errorf("could not read Go build info: %w", err) } else if err != nil { err = fmt.Errorf("could not read Go build info from %s: %w", name, err) diff --git a/src/debug/buildinfo/buildinfo_test.go b/src/debug/buildinfo/buildinfo_test.go index 1c22f1ccdb7..ceab14e8bff 100644 --- a/src/debug/buildinfo/buildinfo_test.go +++ b/src/debug/buildinfo/buildinfo_test.go @@ -238,16 +238,13 @@ func TestReadFile(t *testing.T) { } for _, p := range platforms { - p := p t.Run(p.goos+"_"+p.goarch, func(t *testing.T) { if p != runtimePlatform && !*flagAll { t.Skipf("skipping platforms other than %s_%s because -all was not set", runtimePlatform.goos, runtimePlatform.goarch) } for _, mode := range buildModes { - mode := mode t.Run(mode, func(t *testing.T) { for _, tc := range cases { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() name := tc.build(t, p.goos, p.goarch, mode) diff --git a/src/debug/dwarf/entry.go b/src/debug/dwarf/entry.go index 87414794833..30fad93e793 100644 --- a/src/debug/dwarf/entry.go +++ b/src/debug/dwarf/entry.go @@ -229,7 +229,7 @@ func formToClass(form format, attr Attr, vers int, b *buf) Class { } } -// An entry is a sequence of attribute/value pairs. +// An Entry is a sequence of attribute/value pairs. type Entry struct { Offset Offset // offset of Entry in DWARF info Tag Tag // tag (kind of Entry) diff --git a/src/debug/elf/file_test.go b/src/debug/elf/file_test.go index 0c1a7cf18ae..b796cdb95b6 100644 --- a/src/debug/elf/file_test.go +++ b/src/debug/elf/file_test.go @@ -1040,7 +1040,6 @@ var relocationTests = []relocationTest{ func TestDWARFRelocations(t *testing.T) { for _, test := range relocationTests { - test := test t.Run(test.file, func(t *testing.T) { t.Parallel() f, err := Open(test.file) diff --git a/src/debug/gosym/symtab.go b/src/debug/gosym/symtab.go index bf38927254f..08d46684bf3 100644 --- a/src/debug/gosym/symtab.go +++ b/src/debug/gosym/symtab.go @@ -332,7 +332,8 @@ func walksymtab(data []byte, fn func(sym) error) error { // NewTable decodes the Go symbol table (the ".gosymtab" section in ELF), // returning an in-memory representation. -// Starting with Go 1.3, the Go symbol table no longer includes symbol data. +// Starting with Go 1.3, the Go symbol table no longer includes symbol data; +// callers should pass nil for the symtab parameter. func NewTable(symtab []byte, pcln *LineTable) (*Table, error) { var n int err := walksymtab(symtab, func(s sym) error { diff --git a/src/debug/pe/file_test.go b/src/debug/pe/file_test.go index 3d960ab7f36..acea0455d88 100644 --- a/src/debug/pe/file_test.go +++ b/src/debug/pe/file_test.go @@ -413,7 +413,7 @@ func testDWARF(t *testing.T, linktype int) { var foundDebugGDBScriptsSection bool for _, sect := range f.Sections { - if sect.Name == ".debug_gdb_scripts" { + if sect.Name == ".debug_gdb_scripts" || sect.Name == ".zdebug_gdb_scripts" { foundDebugGDBScriptsSection = true } } diff --git a/src/encoding/asn1/asn1.go b/src/encoding/asn1/asn1.go index 0b64f06d368..f4be515b98e 100644 --- a/src/encoding/asn1/asn1.go +++ b/src/encoding/asn1/asn1.go @@ -22,6 +22,7 @@ package asn1 import ( "errors" "fmt" + "internal/saferio" "math" "math/big" "reflect" @@ -666,10 +667,17 @@ func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type offset += t.length numElements++ } - ret = reflect.MakeSlice(sliceType, numElements, numElements) + elemSize := uint64(elemType.Size()) + safeCap := saferio.SliceCapWithSize(elemSize, uint64(numElements)) + if safeCap < 0 { + err = SyntaxError{fmt.Sprintf("%s slice too big: %d elements of %d bytes", elemType.Kind(), numElements, elemSize)} + return + } + ret = reflect.MakeSlice(sliceType, 0, safeCap) params := fieldParameters{} offset := 0 for i := 0; i < numElements; i++ { + ret = reflect.Append(ret, reflect.Zero(elemType)) offset, err = parseField(ret.Index(i), bytes, offset, params) if err != nil { return diff --git a/src/encoding/asn1/asn1_test.go b/src/encoding/asn1/asn1_test.go index 0597740bd5e..41cc0ba50ec 100644 --- a/src/encoding/asn1/asn1_test.go +++ b/src/encoding/asn1/asn1_test.go @@ -7,10 +7,12 @@ package asn1 import ( "bytes" "encoding/hex" + "errors" "fmt" "math" "math/big" "reflect" + "runtime" "strings" "testing" "time" @@ -1216,3 +1218,39 @@ func TestImplicitTypeRoundtrip(t *testing.T) { t.Fatalf("Unexpected diff after roundtripping struct\na: %#v\nb: %#v", a, b) } } + +func TestParsingMemoryConsumption(t *testing.T) { + // Craft a syntatically valid, but empty, ~10 MB DER bomb. A successful + // unmarshal of this bomb should yield ~280 MB. However, the parsing should + // fail due to the empty content; and, in such cases, we want to make sure + // that we do not unnecessarily allocate memories. + derBomb := make([]byte, 10_000_000) + for i := range derBomb { + derBomb[i] = 0x30 + } + derBomb = append([]byte{0x30, 0x83, 0x98, 0x96, 0x80}, derBomb...) + + var m runtime.MemStats + runtime.GC() + runtime.ReadMemStats(&m) + memBefore := m.TotalAlloc + + var out []struct { + Id []int + Critical bool `asn1:"optional"` + Value []byte + } + _, err := Unmarshal(derBomb, &out) + if !errors.As(err, &SyntaxError{}) { + t.Fatalf("Incorrect error result: want (%v), but got (%v) instead", &SyntaxError{}, err) + } + + runtime.ReadMemStats(&m) + memDiff := m.TotalAlloc - memBefore + + // Ensure that the memory allocated does not exceed 10<<21 (~20 MB) when + // the parsing fails. + if memDiff > 10<<21 { + t.Errorf("Too much memory allocated while parsing DER: %v MiB", memDiff/1024/1024) + } +} diff --git a/src/encoding/asn1/marshal.go b/src/encoding/asn1/marshal.go index 70e4fafc123..ddb3bd85dd6 100644 --- a/src/encoding/asn1/marshal.go +++ b/src/encoding/asn1/marshal.go @@ -460,17 +460,20 @@ func makeBody(value reflect.Value, params fieldParameters) (e encoder, err error case flagType: return bytesEncoder(nil), nil case timeType: - t := value.Interface().(time.Time) + t, _ := reflect.TypeAssert[time.Time](value) if params.timeType == TagGeneralizedTime || outsideUTCRange(t) { return makeGeneralizedTime(t) } return makeUTCTime(t) case bitStringType: - return bitStringEncoder(value.Interface().(BitString)), nil + v, _ := reflect.TypeAssert[BitString](value) + return bitStringEncoder(v), nil case objectIdentifierType: - return makeObjectIdentifier(value.Interface().(ObjectIdentifier)) + v, _ := reflect.TypeAssert[ObjectIdentifier](value) + return makeObjectIdentifier(v) case bigIntType: - return makeBigInt(value.Interface().(*big.Int)) + v, _ := reflect.TypeAssert[*big.Int](value) + return makeBigInt(v) } switch v := value; v.Kind() { @@ -605,7 +608,7 @@ func makeField(v reflect.Value, params fieldParameters) (e encoder, err error) { } if v.Type() == rawValueType { - rv := v.Interface().(RawValue) + rv, _ := reflect.TypeAssert[RawValue](v) if len(rv.FullBytes) != 0 { return bytesEncoder(rv.FullBytes), nil } @@ -650,7 +653,8 @@ func makeField(v reflect.Value, params fieldParameters) (e encoder, err error) { tag = params.stringType } case TagUTCTime: - if params.timeType == TagGeneralizedTime || outsideUTCRange(v.Interface().(time.Time)) { + t, _ := reflect.TypeAssert[time.Time](v) + if params.timeType == TagGeneralizedTime || outsideUTCRange(t) { tag = TagGeneralizedTime } } diff --git a/src/encoding/base32/base32_test.go b/src/encoding/base32/base32_test.go index f5d3c49e38f..6f8d564def3 100644 --- a/src/encoding/base32/base32_test.go +++ b/src/encoding/base32/base32_test.go @@ -709,7 +709,6 @@ func TestBufferedDecodingPadding(t *testing.T) { } for _, testcase := range testcases { - testcase := testcase pr, pw := io.Pipe() go func() { for _, chunk := range testcase.chunks { diff --git a/src/encoding/binary/varint_test.go b/src/encoding/binary/varint_test.go index 5c3ea318c39..dbb615070bf 100644 --- a/src/encoding/binary/varint_test.go +++ b/src/encoding/binary/varint_test.go @@ -181,7 +181,6 @@ func TestBufferTooBigWithOverflow(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { value, n := Uvarint(tt.in) if g, w := n, tt.wantN; g != w { diff --git a/src/encoding/gob/encoder_test.go b/src/encoding/gob/encoder_test.go index 3ee43fbc94b..3810685cbd1 100644 --- a/src/encoding/gob/encoder_test.go +++ b/src/encoding/gob/encoder_test.go @@ -1274,7 +1274,7 @@ func TestDecoderOverflow(t *testing.T) { 0x12, 0xff, 0xff, 0x2, 0x2, 0x20, 0x0, 0xf8, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x20, 0x20, 0x20, 0x20, 0x20, })) - var r interface{} + var r any err := dec.Decode(r) if err == nil { t.Fatalf("expected an error") diff --git a/src/encoding/json/decode_test.go b/src/encoding/json/decode_test.go index d12495f90b7..0b26b8eb918 100644 --- a/src/encoding/json/decode_test.go +++ b/src/encoding/json/decode_test.go @@ -1237,6 +1237,62 @@ var unmarshalTests = []struct { out: (chan int)(nil), err: &UnmarshalTypeError{Value: "number", Type: reflect.TypeFor[chan int](), Offset: 1}, }, + + // #75619 + { + CaseName: Name("QuotedInt/GoSyntax"), + in: `{"X": "-0000123"}`, + ptr: new(struct { + X int64 `json:",string"` + }), + out: struct { + X int64 `json:",string"` + }{-123}, + }, + { + CaseName: Name("QuotedInt/Invalid"), + in: `{"X": "123 "}`, + ptr: new(struct { + X int64 `json:",string"` + }), + err: &UnmarshalTypeError{Value: "number 123 ", Type: reflect.TypeFor[int64](), Field: "X", Offset: int64(len(`{"X": "123 "`))}, + }, + { + CaseName: Name("QuotedUint/GoSyntax"), + in: `{"X": "0000123"}`, + ptr: new(struct { + X uint64 `json:",string"` + }), + out: struct { + X uint64 `json:",string"` + }{123}, + }, + { + CaseName: Name("QuotedUint/Invalid"), + in: `{"X": "0x123"}`, + ptr: new(struct { + X uint64 `json:",string"` + }), + err: &UnmarshalTypeError{Value: "number 0x123", Type: reflect.TypeFor[uint64](), Field: "X", Offset: int64(len(`{"X": "0x123"`))}, + }, + { + CaseName: Name("QuotedFloat/GoSyntax"), + in: `{"X": "0x1_4p-2"}`, + ptr: new(struct { + X float64 `json:",string"` + }), + out: struct { + X float64 `json:",string"` + }{0x1_4p-2}, + }, + { + CaseName: Name("QuotedFloat/Invalid"), + in: `{"X": "1.5e1_"}`, + ptr: new(struct { + X float64 `json:",string"` + }), + err: &UnmarshalTypeError{Value: "number 1.5e1_", Type: reflect.TypeFor[float64](), Field: "X", Offset: int64(len(`{"X": "1.5e1_"`))}, + }, } func TestMarshal(t *testing.T) { diff --git a/src/encoding/json/internal/jsonopts/options.go b/src/encoding/json/internal/jsonopts/options.go index e4c3f47d36a..39da81b3454 100644 --- a/src/encoding/json/internal/jsonopts/options.go +++ b/src/encoding/json/internal/jsonopts/options.go @@ -48,16 +48,16 @@ type ArshalValues struct { // DefaultOptionsV2 is the set of all options that define default v2 behavior. var DefaultOptionsV2 = Struct{ Flags: jsonflags.Flags{ - Presence: uint64(jsonflags.AllFlags & ^jsonflags.WhitespaceFlags), - Values: uint64(0), + Presence: uint64(jsonflags.DefaultV1Flags), + Values: uint64(0), // all flags in DefaultV1Flags are false }, } // DefaultOptionsV1 is the set of all options that define default v1 behavior. var DefaultOptionsV1 = Struct{ Flags: jsonflags.Flags{ - Presence: uint64(jsonflags.AllFlags & ^jsonflags.WhitespaceFlags), - Values: uint64(jsonflags.DefaultV1Flags), + Presence: uint64(jsonflags.DefaultV1Flags), + Values: uint64(jsonflags.DefaultV1Flags), // all flags in DefaultV1Flags are true }, } diff --git a/src/encoding/json/internal/jsonopts/options_test.go b/src/encoding/json/internal/jsonopts/options_test.go index ebfaf05c833..caa686e4f0d 100644 --- a/src/encoding/json/internal/jsonopts/options_test.go +++ b/src/encoding/json/internal/jsonopts/options_test.go @@ -200,6 +200,9 @@ func TestGet(t *testing.T) { if v, ok := json.GetOption(opts, json.WithUnmarshalers); v != nil || ok { t.Errorf(`GetOption(..., WithUnmarshalers) = (%v, %v), want (nil, false)`, v, ok) } + if v, ok := json.GetOption(json.DefaultOptionsV2(), json.WithMarshalers); v != nil || ok { + t.Errorf(`GetOption(..., WithMarshalers) = (%v, %v), want (nil, false)`, v, ok) + } } var sink struct { diff --git a/src/encoding/json/jsontext/coder_test.go b/src/encoding/json/jsontext/coder_test.go index 4a9efb3b8f9..8602e3e7fff 100644 --- a/src/encoding/json/jsontext/coder_test.go +++ b/src/encoding/json/jsontext/coder_test.go @@ -486,7 +486,7 @@ func testCoderInterleaved(t *testing.T, where jsontest.CasePos, modeName string, // Retry as a ReadToken call. expectError := dec.PeekKind() == '}' || dec.PeekKind() == ']' if expectError { - if !errors.As(err, new(*SyntacticError)) { + if _, ok := errors.AsType[*SyntacticError](err); !ok { t.Fatalf("%s: Decoder.ReadToken error is %T, want %T", where, err, new(SyntacticError)) } tickTock = !tickTock diff --git a/src/encoding/json/jsontext/decode.go b/src/encoding/json/jsontext/decode.go index f505de44684..511832f2ae0 100644 --- a/src/encoding/json/jsontext/decode.go +++ b/src/encoding/json/jsontext/decode.go @@ -792,6 +792,12 @@ func (d *decoderState) CheckNextValue(last bool) error { return nil } +// AtEOF reports whether the decoder is at EOF. +func (d *decoderState) AtEOF() bool { + _, err := d.consumeWhitespace(d.prevEnd) + return err == io.ErrUnexpectedEOF +} + // CheckEOF verifies that the input has no more data. func (d *decoderState) CheckEOF() error { return d.checkEOF(d.prevEnd) diff --git a/src/encoding/json/jsontext/fuzz_test.go b/src/encoding/json/jsontext/fuzz_test.go index 60d16b9e278..3ad181d4341 100644 --- a/src/encoding/json/jsontext/fuzz_test.go +++ b/src/encoding/json/jsontext/fuzz_test.go @@ -53,9 +53,10 @@ func FuzzCoder(f *testing.F) { } else { val, err := dec.ReadValue() if err != nil { - expectError := dec.PeekKind() == '}' || dec.PeekKind() == ']' - if expectError && errors.As(err, new(*SyntacticError)) { - continue + if expectError := dec.PeekKind() == '}' || dec.PeekKind() == ']'; expectError { + if _, ok := errors.AsType[*SyntacticError](err); ok { + continue + } } if err == io.EOF { break diff --git a/src/encoding/json/jsontext/pools.go b/src/encoding/json/jsontext/pools.go index 4f9e0ea410c..3066ab4a1d2 100644 --- a/src/encoding/json/jsontext/pools.go +++ b/src/encoding/json/jsontext/pools.go @@ -54,6 +54,10 @@ func getBufferedEncoder(opts ...Options) *Encoder { return e } func putBufferedEncoder(e *Encoder) { + if cap(e.s.availBuffer) > 64<<10 { + e.s.availBuffer = nil // avoid pinning arbitrarily large amounts of memory + } + // Recycle large buffers only if sufficiently utilized. // If a buffer is under-utilized enough times sequentially, // then it is discarded, ensuring that a single large buffer @@ -95,9 +99,14 @@ func getStreamingEncoder(w io.Writer, opts ...Options) *Encoder { } } func putStreamingEncoder(e *Encoder) { + if cap(e.s.availBuffer) > 64<<10 { + e.s.availBuffer = nil // avoid pinning arbitrarily large amounts of memory + } if _, ok := e.s.wr.(*bytes.Buffer); ok { + e.s.wr, e.s.Buf = nil, nil // avoid pinning the provided bytes.Buffer bytesBufferEncoderPool.Put(e) } else { + e.s.wr = nil // avoid pinning the provided io.Writer if cap(e.s.Buf) > 64<<10 { e.s.Buf = nil // avoid pinning arbitrarily large amounts of memory } @@ -126,6 +135,7 @@ func getBufferedDecoder(b []byte, opts ...Options) *Decoder { return d } func putBufferedDecoder(d *Decoder) { + d.s.buf = nil // avoid pinning the provided buffer bufferedDecoderPool.Put(d) } @@ -142,8 +152,10 @@ func getStreamingDecoder(r io.Reader, opts ...Options) *Decoder { } func putStreamingDecoder(d *Decoder) { if _, ok := d.s.rd.(*bytes.Buffer); ok { + d.s.rd, d.s.buf = nil, nil // avoid pinning the provided bytes.Buffer bytesBufferDecoderPool.Put(d) } else { + d.s.rd = nil // avoid pinning the provided io.Reader if cap(d.s.buf) > 64<<10 { d.s.buf = nil // avoid pinning arbitrarily large amounts of memory } diff --git a/src/encoding/json/jsontext/state.go b/src/encoding/json/jsontext/state.go index d214fd51903..e93057a34c5 100644 --- a/src/encoding/json/jsontext/state.go +++ b/src/encoding/json/jsontext/state.go @@ -24,8 +24,8 @@ import ( // The name of a duplicate JSON object member can be extracted as: // // err := ... -// var serr jsontext.SyntacticError -// if errors.As(err, &serr) && serr.Err == jsontext.ErrDuplicateName { +// serr, ok := errors.AsType[*jsontext.SyntacticError](err) +// if ok && serr.Err == jsontext.ErrDuplicateName { // ptr := serr.JSONPointer // JSON pointer to duplicate name // name := ptr.LastToken() // duplicate name itself // ... diff --git a/src/encoding/json/v2/arshal.go b/src/encoding/json/v2/arshal.go index 6b4bcb0c74c..5537a467d83 100644 --- a/src/encoding/json/v2/arshal.go +++ b/src/encoding/json/v2/arshal.go @@ -11,8 +11,6 @@ import ( "encoding" "io" "reflect" - "slices" - "strings" "sync" "time" @@ -440,8 +438,9 @@ func UnmarshalRead(in io.Reader, out any, opts ...Options) (err error) { // Unlike [Unmarshal] and [UnmarshalRead], decode options are ignored because // they must have already been specified on the provided [jsontext.Decoder]. // -// The input may be a stream of one or more JSON values, +// The input may be a stream of zero or more JSON values, // where this only unmarshals the next JSON value in the stream. +// If there are no more top-level JSON values, it reports [io.EOF]. // The output must be a non-nil pointer. // See [Unmarshal] for details about the conversion of JSON into a Go value. func UnmarshalDecode(in *jsontext.Decoder, out any, opts ...Options) (err error) { @@ -572,9 +571,6 @@ func putStrings(s *stringSlice) { if cap(*s) > 1<<10 { *s = nil // avoid pinning arbitrarily large amounts of memory } + clear(*s) // avoid pinning a reference to each string stringsPools.Put(s) } - -func (ss *stringSlice) Sort() { - slices.SortFunc(*ss, func(x, y string) int { return strings.Compare(x, y) }) -} diff --git a/src/encoding/json/v2/arshal_any.go b/src/encoding/json/v2/arshal_any.go index 97a77e92376..8c0c445404d 100644 --- a/src/encoding/json/v2/arshal_any.go +++ b/src/encoding/json/v2/arshal_any.go @@ -10,6 +10,7 @@ import ( "cmp" "math" "reflect" + "slices" "strconv" "encoding/json/internal" @@ -153,7 +154,7 @@ func marshalObjectAny(enc *jsontext.Encoder, obj map[string]any, mo *jsonopts.St (*names)[i] = name i++ } - names.Sort() + slices.Sort(*names) for _, name := range *names { if err := enc.WriteToken(jsontext.String(name)); err != nil { return err diff --git a/src/encoding/json/v2/arshal_default.go b/src/encoding/json/v2/arshal_default.go index c2307fa31d7..33931af17e6 100644 --- a/src/encoding/json/v2/arshal_default.go +++ b/src/encoding/json/v2/arshal_default.go @@ -474,10 +474,21 @@ func makeIntArshaler(t reflect.Type) *arshaler { break } val = jsonwire.UnquoteMayCopy(val, flags.IsVerbatim()) - if uo.Flags.Get(jsonflags.StringifyWithLegacySemantics) && string(val) == "null" { - if !uo.Flags.Get(jsonflags.MergeWithLegacySemantics) { - va.SetInt(0) + if uo.Flags.Get(jsonflags.StringifyWithLegacySemantics) { + // For historical reasons, v1 parsed a quoted number + // according to the Go syntax and permitted a quoted null. + // See https://go.dev/issue/75619 + n, err := strconv.ParseInt(string(val), 10, bits) + if err != nil { + if string(val) == "null" { + if !uo.Flags.Get(jsonflags.MergeWithLegacySemantics) { + va.SetInt(0) + } + return nil + } + return newUnmarshalErrorAfterWithValue(dec, t, errors.Unwrap(err)) } + va.SetInt(n) return nil } fallthrough @@ -561,10 +572,21 @@ func makeUintArshaler(t reflect.Type) *arshaler { break } val = jsonwire.UnquoteMayCopy(val, flags.IsVerbatim()) - if uo.Flags.Get(jsonflags.StringifyWithLegacySemantics) && string(val) == "null" { - if !uo.Flags.Get(jsonflags.MergeWithLegacySemantics) { - va.SetUint(0) + if uo.Flags.Get(jsonflags.StringifyWithLegacySemantics) { + // For historical reasons, v1 parsed a quoted number + // according to the Go syntax and permitted a quoted null. + // See https://go.dev/issue/75619 + n, err := strconv.ParseUint(string(val), 10, bits) + if err != nil { + if string(val) == "null" { + if !uo.Flags.Get(jsonflags.MergeWithLegacySemantics) { + va.SetUint(0) + } + return nil + } + return newUnmarshalErrorAfterWithValue(dec, t, errors.Unwrap(err)) } + va.SetUint(n) return nil } fallthrough @@ -671,10 +693,21 @@ func makeFloatArshaler(t reflect.Type) *arshaler { if !stringify { break } - if uo.Flags.Get(jsonflags.StringifyWithLegacySemantics) && string(val) == "null" { - if !uo.Flags.Get(jsonflags.MergeWithLegacySemantics) { - va.SetFloat(0) + if uo.Flags.Get(jsonflags.StringifyWithLegacySemantics) { + // For historical reasons, v1 parsed a quoted number + // according to the Go syntax and permitted a quoted null. + // See https://go.dev/issue/75619 + n, err := strconv.ParseFloat(string(val), bits) + if err != nil { + if string(val) == "null" { + if !uo.Flags.Get(jsonflags.MergeWithLegacySemantics) { + va.SetFloat(0) + } + return nil + } + return newUnmarshalErrorAfterWithValue(dec, t, errors.Unwrap(err)) } + va.SetFloat(n) return nil } if n, err := jsonwire.ConsumeNumber(val); n != len(val) || err != nil { @@ -810,7 +843,7 @@ func makeMapArshaler(t reflect.Type) *arshaler { k.SetIterKey(iter) (*names)[i] = k.String() } - names.Sort() + slices.Sort(*names) for _, name := range *names { if err := enc.WriteToken(jsontext.String(name)); err != nil { return err diff --git a/src/encoding/json/v2/arshal_funcs.go b/src/encoding/json/v2/arshal_funcs.go index 673caf3c376..28916af948d 100644 --- a/src/encoding/json/v2/arshal_funcs.go +++ b/src/encoding/json/v2/arshal_funcs.go @@ -9,6 +9,7 @@ package json import ( "errors" "fmt" + "io" "reflect" "sync" @@ -306,6 +307,9 @@ func UnmarshalFromFunc[T any](fn func(*jsontext.Decoder, T) error) *Unmarshalers fnc: func(dec *jsontext.Decoder, va addressableValue, uo *jsonopts.Struct) error { xd := export.Decoder(dec) prevDepth, prevLength := xd.Tokens.DepthLength() + if prevDepth == 1 && xd.AtEOF() { + return io.EOF // check EOF early to avoid fn reporting an EOF + } xd.Flags.Set(jsonflags.WithinArshalCall | 1) v, _ := reflect.TypeAssert[T](va.castTo(t)) err := fn(dec, v) diff --git a/src/encoding/json/v2/arshal_inlined.go b/src/encoding/json/v2/arshal_inlined.go index d911bfa1c04..03e563a0c09 100644 --- a/src/encoding/json/v2/arshal_inlined.go +++ b/src/encoding/json/v2/arshal_inlined.go @@ -11,6 +11,7 @@ import ( "errors" "io" "reflect" + "slices" "encoding/json/internal/jsonflags" "encoding/json/internal/jsonopts" @@ -146,7 +147,7 @@ func marshalInlinedFallbackAll(enc *jsontext.Encoder, va addressableValue, mo *j mk.SetIterKey(iter) (*names)[i] = mk.String() } - names.Sort() + slices.Sort(*names) for _, name := range *names { mk.SetString(name) if err := marshalKey(mk); err != nil { diff --git a/src/encoding/json/v2/arshal_methods.go b/src/encoding/json/v2/arshal_methods.go index 2decd144dbe..1621eadc080 100644 --- a/src/encoding/json/v2/arshal_methods.go +++ b/src/encoding/json/v2/arshal_methods.go @@ -9,6 +9,7 @@ package json import ( "encoding" "errors" + "io" "reflect" "encoding/json/internal" @@ -302,6 +303,9 @@ func makeMethodArshaler(fncs *arshaler, t reflect.Type) *arshaler { } xd := export.Decoder(dec) prevDepth, prevLength := xd.Tokens.DepthLength() + if prevDepth == 1 && xd.AtEOF() { + return io.EOF // check EOF early to avoid fn reporting an EOF + } xd.Flags.Set(jsonflags.WithinArshalCall | 1) unmarshaler, _ := reflect.TypeAssert[UnmarshalerFrom](va.Addr()) err := unmarshaler.UnmarshalJSONFrom(dec) diff --git a/src/encoding/json/v2/arshal_test.go b/src/encoding/json/v2/arshal_test.go index 75093345a3b..dc15c5a5f53 100644 --- a/src/encoding/json/v2/arshal_test.go +++ b/src/encoding/json/v2/arshal_test.go @@ -7833,7 +7833,8 @@ func TestUnmarshal(t *testing.T) { })), wantErr: EU(errSomeError).withType(0, T[unmarshalJSONv2Func]()), }, { - name: jsontest.Name("Methods/Invalid/JSONv2/TooFew"), + name: jsontest.Name("Methods/Invalid/JSONv2/TooFew"), + inBuf: `{}`, inVal: addr(unmarshalJSONv2Func(func(*jsontext.Decoder) error { return nil // do nothing })), @@ -9234,6 +9235,43 @@ func TestUnmarshalReuse(t *testing.T) { }) } +type unmarshalerEOF struct{} + +func (unmarshalerEOF) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + return io.EOF // should be wrapped and converted by Unmarshal to io.ErrUnexpectedEOF +} + +// TestUnmarshalEOF verifies that io.EOF is only ever returned by +// UnmarshalDecode for a top-level value. +func TestUnmarshalEOF(t *testing.T) { + opts := WithUnmarshalers(UnmarshalFromFunc(func(dec *jsontext.Decoder, _ *struct{}) error { + return io.EOF // should be wrapped and converted by Unmarshal to io.ErrUnexpectedEOF + })) + + for _, in := range []string{"", "[", "[null", "[null]"} { + for _, newOut := range []func() any{ + func() any { return new(unmarshalerEOF) }, + func() any { return new([]unmarshalerEOF) }, + func() any { return new(struct{}) }, + func() any { return new([]struct{}) }, + } { + wantErr := io.ErrUnexpectedEOF + if gotErr := Unmarshal([]byte(in), newOut(), opts); !errors.Is(gotErr, wantErr) { + t.Errorf("Unmarshal = %v, want %v", gotErr, wantErr) + } + if gotErr := UnmarshalRead(strings.NewReader(in), newOut(), opts); !errors.Is(gotErr, wantErr) { + t.Errorf("Unmarshal = %v, want %v", gotErr, wantErr) + } + switch gotErr := UnmarshalDecode(jsontext.NewDecoder(strings.NewReader(in)), newOut(), opts); { + case in != "" && !errors.Is(gotErr, wantErr): + t.Errorf("Unmarshal = %v, want %v", gotErr, wantErr) + case in == "" && gotErr != io.EOF: + t.Errorf("Unmarshal = %v, want %v", gotErr, io.EOF) + } + } + } +} + type ReaderFunc func([]byte) (int, error) func (f ReaderFunc) Read(b []byte) (int, error) { return f(b) } diff --git a/src/encoding/json/v2/errors.go b/src/encoding/json/v2/errors.go index 9485d7b5277..4895386fe2c 100644 --- a/src/encoding/json/v2/errors.go +++ b/src/encoding/json/v2/errors.go @@ -10,6 +10,7 @@ import ( "cmp" "errors" "fmt" + "io" "reflect" "strconv" "strings" @@ -28,8 +29,8 @@ import ( // The name of an unknown JSON object member can be extracted as: // // err := ... -// var serr json.SemanticError -// if errors.As(err, &serr) && serr.Err == json.ErrUnknownName { +// serr, ok := errors.AsType[*json.SemanticError](err) +// if ok && serr.Err == json.ErrUnknownName { // ptr := serr.JSONPointer // JSON pointer to unknown name // name := ptr.LastToken() // unknown name itself // ... @@ -119,7 +120,7 @@ func newInvalidFormatError(c coder, t reflect.Type) error { // newMarshalErrorBefore wraps err in a SemanticError assuming that e // is positioned right before the next token or value, which causes an error. func newMarshalErrorBefore(e *jsontext.Encoder, t reflect.Type, err error) error { - return &SemanticError{action: "marshal", GoType: t, Err: err, + return &SemanticError{action: "marshal", GoType: t, Err: toUnexpectedEOF(err), ByteOffset: e.OutputOffset() + int64(export.Encoder(e).CountNextDelimWhitespace()), JSONPointer: jsontext.Pointer(export.Encoder(e).AppendStackPointer(nil, +1))} } @@ -135,7 +136,7 @@ func newUnmarshalErrorBefore(d *jsontext.Decoder, t reflect.Type, err error) err if export.Decoder(d).Flags.Get(jsonflags.ReportErrorsWithLegacySemantics) { k = d.PeekKind() } - return &SemanticError{action: "unmarshal", GoType: t, Err: err, + return &SemanticError{action: "unmarshal", GoType: t, Err: toUnexpectedEOF(err), ByteOffset: d.InputOffset() + int64(export.Decoder(d).CountNextDelimWhitespace()), JSONPointer: jsontext.Pointer(export.Decoder(d).AppendStackPointer(nil, +1)), JSONKind: k} @@ -158,7 +159,7 @@ func newUnmarshalErrorBeforeWithSkipping(d *jsontext.Decoder, t reflect.Type, er // is positioned right after the previous token or value, which caused an error. func newUnmarshalErrorAfter(d *jsontext.Decoder, t reflect.Type, err error) error { tokOrVal := export.Decoder(d).PreviousTokenOrValue() - return &SemanticError{action: "unmarshal", GoType: t, Err: err, + return &SemanticError{action: "unmarshal", GoType: t, Err: toUnexpectedEOF(err), ByteOffset: d.InputOffset() - int64(len(tokOrVal)), JSONPointer: jsontext.Pointer(export.Decoder(d).AppendStackPointer(nil, -1)), JSONKind: jsontext.Value(tokOrVal).Kind()} @@ -207,6 +208,7 @@ func newSemanticErrorWithPosition(c coder, t reflect.Type, prevDepth int, prevLe if serr == nil { serr = &SemanticError{Err: err} } + serr.Err = toUnexpectedEOF(serr.Err) var currDepth int var currLength int64 var coderState interface{ AppendStackPointer([]byte, int) []byte } @@ -433,3 +435,11 @@ func newDuplicateNameError(ptr jsontext.Pointer, quotedName []byte, offset int64 Err: jsontext.ErrDuplicateName, } } + +// toUnexpectedEOF converts [io.EOF] to [io.ErrUnexpectedEOF]. +func toUnexpectedEOF(err error) error { + if err == io.EOF { + return io.ErrUnexpectedEOF + } + return err +} diff --git a/src/encoding/json/v2/example_test.go b/src/encoding/json/v2/example_test.go index c6bf0a864d8..dc1f06674ce 100644 --- a/src/encoding/json/v2/example_test.go +++ b/src/encoding/json/v2/example_test.go @@ -371,8 +371,8 @@ func Example_unknownMembers() { // Specifying RejectUnknownMembers causes Unmarshal // to reject the presence of any unknown members. err = json.Unmarshal([]byte(input), new(Color), json.RejectUnknownMembers(true)) - var serr *json.SemanticError - if errors.As(err, &serr) && serr.Err == json.ErrUnknownName { + serr, ok := errors.AsType[*json.SemanticError](err) + if ok && serr.Err == json.ErrUnknownName { fmt.Println("Unmarshal error:", serr.Err, strconv.Quote(serr.JSONPointer.LastToken())) } diff --git a/src/encoding/json/v2/options.go b/src/encoding/json/v2/options.go index 0942d2d3078..9685f20f9f8 100644 --- a/src/encoding/json/v2/options.go +++ b/src/encoding/json/v2/options.go @@ -97,9 +97,8 @@ func GetOption[T any](opts Options, setter func(T) Options) (T, bool) { } // DefaultOptionsV2 is the full set of all options that define v2 semantics. -// It is equivalent to all options under [Options], [encoding/json.Options], -// and [encoding/json/jsontext.Options] being set to false or the zero value, -// except for the options related to whitespace formatting. +// It is equivalent to the set of options in [encoding/json.DefaultOptionsV1] +// all being set to false. All other options are not present. func DefaultOptionsV2() Options { return &jsonopts.DefaultOptionsV2 } diff --git a/src/encoding/json/v2_decode.go b/src/encoding/json/v2_decode.go index 1041ec7ee40..f17d7ebccad 100644 --- a/src/encoding/json/v2_decode.go +++ b/src/encoding/json/v2_decode.go @@ -14,6 +14,7 @@ import ( "fmt" "reflect" "strconv" + "strings" "encoding/json/internal/jsonwire" "encoding/json/jsontext" @@ -119,7 +120,20 @@ type UnmarshalTypeError struct { func (e *UnmarshalTypeError) Error() string { var s string if e.Struct != "" || e.Field != "" { - s = "json: cannot unmarshal " + e.Value + " into Go struct field " + e.Struct + "." + e.Field + " of type " + e.Type.String() + // The design of UnmarshalTypeError overly assumes a struct-based + // Go representation for the JSON value. + // The logic in jsontext represents paths using a JSON Pointer, + // which is agnostic to the Go type system. + // Trying to convert a JSON Pointer into a UnmarshalTypeError.Field + // is difficult. As a heuristic, if the last path token looks like + // an index into a JSON array (e.g., ".foo.bar.0"), + // avoid the phrase "Go struct field ". + intoWhat := "Go struct field " + i := strings.LastIndexByte(e.Field, '.') + len(".") + if len(e.Field[i:]) > 0 && strings.TrimRight(e.Field[i:], "0123456789") == "" { + intoWhat = "" // likely a Go slice or array + } + s = "json: cannot unmarshal " + e.Value + " into " + intoWhat + e.Struct + "." + e.Field + " of type " + e.Type.String() } else { s = "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() } diff --git a/src/encoding/json/v2_decode_test.go b/src/encoding/json/v2_decode_test.go index f9b0a60f47c..26b4448721e 100644 --- a/src/encoding/json/v2_decode_test.go +++ b/src/encoding/json/v2_decode_test.go @@ -1243,6 +1243,62 @@ var unmarshalTests = []struct { out: (chan int)(nil), err: &UnmarshalTypeError{Value: "number", Type: reflect.TypeFor[chan int]()}, }, + + // #75619 + { + CaseName: Name("QuotedInt/GoSyntax"), + in: `{"X": "-0000123"}`, + ptr: new(struct { + X int64 `json:",string"` + }), + out: struct { + X int64 `json:",string"` + }{-123}, + }, + { + CaseName: Name("QuotedInt/Invalid"), + in: `{"X": "123 "}`, + ptr: new(struct { + X int64 `json:",string"` + }), + err: &UnmarshalTypeError{Value: "number 123 ", Type: reflect.TypeFor[int64](), Field: "X", Offset: int64(len(`{"X": `))}, + }, + { + CaseName: Name("QuotedUint/GoSyntax"), + in: `{"X": "0000123"}`, + ptr: new(struct { + X uint64 `json:",string"` + }), + out: struct { + X uint64 `json:",string"` + }{123}, + }, + { + CaseName: Name("QuotedUint/Invalid"), + in: `{"X": "0x123"}`, + ptr: new(struct { + X uint64 `json:",string"` + }), + err: &UnmarshalTypeError{Value: "number 0x123", Type: reflect.TypeFor[uint64](), Field: "X", Offset: int64(len(`{"X": `))}, + }, + { + CaseName: Name("QuotedFloat/GoSyntax"), + in: `{"X": "0x1_4p-2"}`, + ptr: new(struct { + X float64 `json:",string"` + }), + out: struct { + X float64 `json:",string"` + }{0x1_4p-2}, + }, + { + CaseName: Name("QuotedFloat/Invalid"), + in: `{"X": "1.5e1_"}`, + ptr: new(struct { + X float64 `json:",string"` + }), + err: &UnmarshalTypeError{Value: "number 1.5e1_", Type: reflect.TypeFor[float64](), Field: "X", Offset: int64(len(`{"X": `))}, + }, } func TestMarshal(t *testing.T) { @@ -2307,6 +2363,34 @@ func TestUnmarshalTypeError(t *testing.T) { } } +func TestUnmarshalTypeErrorMessage(t *testing.T) { + err := &UnmarshalTypeError{ + Value: "number 5", + Type: reflect.TypeFor[int](), + Offset: 1234, + Struct: "Root", + } + + for _, tt := range []struct { + field string + want string + }{ + {"", "json: cannot unmarshal number 5 into Go struct field Root. of type int"}, + {"1", "json: cannot unmarshal number 5 into Root.1 of type int"}, + {"foo", "json: cannot unmarshal number 5 into Go struct field Root.foo of type int"}, + {"foo.1", "json: cannot unmarshal number 5 into Root.foo.1 of type int"}, + {"foo.bar", "json: cannot unmarshal number 5 into Go struct field Root.foo.bar of type int"}, + {"foo.bar.1", "json: cannot unmarshal number 5 into Root.foo.bar.1 of type int"}, + {"foo.bar.baz", "json: cannot unmarshal number 5 into Go struct field Root.foo.bar.baz of type int"}, + } { + err.Field = tt.field + got := err.Error() + if got != tt.want { + t.Errorf("Error:\n\tgot: %v\n\twant: %v", got, tt.want) + } + } +} + func TestUnmarshalSyntax(t *testing.T) { var x any tests := []struct { diff --git a/src/encoding/json/v2_options.go b/src/encoding/json/v2_options.go index 4dea88ad7ed..2bdec86fdea 100644 --- a/src/encoding/json/v2_options.go +++ b/src/encoding/json/v2_options.go @@ -227,9 +227,7 @@ type Options = jsonopts.Options // - [jsontext.EscapeForJS] // - [jsontext.PreserveRawStrings] // -// All other boolean options are set to false. -// All non-boolean options are set to the zero value, -// except for [jsontext.WithIndent], which defaults to "\t". +// All other options are not present. // // The [Marshal] and [Unmarshal] functions in this package are // semantically identical to calling the v2 equivalents with this option: @@ -506,7 +504,9 @@ func ReportErrorsWithLegacySemantics(v bool) Options { // When marshaling, such Go values are serialized as their usual // JSON representation, but quoted within a JSON string. // When unmarshaling, such Go values must be deserialized from -// a JSON string containing their usual JSON representation. +// a JSON string containing their usual JSON representation or +// Go number representation for that numeric kind. +// Note that the Go number grammar is a superset of the JSON number grammar. // A JSON null quoted in a JSON string is a valid substitute for JSON null // while unmarshaling into a Go value that `string` takes effect on. // diff --git a/src/encoding/pem/pem.go b/src/encoding/pem/pem.go index dcc7416ee21..6bf2b41ad0e 100644 --- a/src/encoding/pem/pem.go +++ b/src/encoding/pem/pem.go @@ -37,7 +37,7 @@ type Block struct { // line bytes. The remainder of the byte array (also not including the new line // bytes) is also returned and this will always be smaller than the original // argument. -func getLine(data []byte) (line, rest []byte) { +func getLine(data []byte) (line, rest []byte, consumed int) { i := bytes.IndexByte(data, '\n') var j int if i < 0 { @@ -49,7 +49,7 @@ func getLine(data []byte) (line, rest []byte) { i-- } } - return bytes.TrimRight(data[0:i], " \t"), data[j:] + return bytes.TrimRight(data[0:i], " \t"), data[j:], j } // removeSpacesAndTabs returns a copy of its input with all spaces and tabs @@ -90,17 +90,37 @@ func Decode(data []byte) (p *Block, rest []byte) { // pemStart begins with a newline. However, at the very beginning of // the byte array, we'll accept the start string without it. rest = data + + endTrailerIndex := 0 for { - if bytes.HasPrefix(rest, pemStart[1:]) { - rest = rest[len(pemStart)-1:] - } else if _, after, ok := bytes.Cut(rest, pemStart); ok { - rest = after - } else { + // If we've already tried parsing a block, skip past the END we already + // saw. + if endTrailerIndex < 0 || endTrailerIndex > len(rest) { return nil, data } + rest = rest[endTrailerIndex:] + + // Find the first END line, and then find the last BEGIN line before + // the end line. This lets us skip any repeated BEGIN lines that don't + // have a matching END. + endIndex := bytes.Index(rest, pemEnd) + if endIndex < 0 { + return nil, data + } + endTrailerIndex = endIndex + len(pemEnd) + beginIndex := bytes.LastIndex(rest[:endIndex], pemStart[1:]) + if beginIndex < 0 || (beginIndex > 0 && rest[beginIndex-1] != '\n') { + continue + } + rest = rest[beginIndex+len(pemStart)-1:] + endIndex -= beginIndex + len(pemStart) - 1 + endTrailerIndex -= beginIndex + len(pemStart) - 1 var typeLine []byte - typeLine, rest = getLine(rest) + var consumed int + typeLine, rest, consumed = getLine(rest) + endIndex -= consumed + endTrailerIndex -= consumed if !bytes.HasSuffix(typeLine, pemEndOfLine) { continue } @@ -117,7 +137,7 @@ func Decode(data []byte) (p *Block, rest []byte) { if len(rest) == 0 { return nil, data } - line, next := getLine(rest) + line, next, consumed := getLine(rest) key, val, ok := bytes.Cut(line, colon) if !ok { @@ -129,21 +149,13 @@ func Decode(data []byte) (p *Block, rest []byte) { val = bytes.TrimSpace(val) p.Headers[string(key)] = string(val) rest = next + endIndex -= consumed + endTrailerIndex -= consumed } - var endIndex, endTrailerIndex int - - // If there were no headers, the END line might occur - // immediately, without a leading newline. - if len(p.Headers) == 0 && bytes.HasPrefix(rest, pemEnd[1:]) { - endIndex = 0 - endTrailerIndex = len(pemEnd) - 1 - } else { - endIndex = bytes.Index(rest, pemEnd) - endTrailerIndex = endIndex + len(pemEnd) - } - - if endIndex < 0 { + // If there were headers, there must be a newline between the headers + // and the END line, so endIndex should be >= 0. + if len(p.Headers) > 0 && endIndex < 0 { continue } @@ -163,21 +175,24 @@ func Decode(data []byte) (p *Block, rest []byte) { } // The line must end with only whitespace. - if s, _ := getLine(restOfEndLine); len(s) != 0 { + if s, _, _ := getLine(restOfEndLine); len(s) != 0 { continue } - base64Data := removeSpacesAndTabs(rest[:endIndex]) - p.Bytes = make([]byte, base64.StdEncoding.DecodedLen(len(base64Data))) - n, err := base64.StdEncoding.Decode(p.Bytes, base64Data) - if err != nil { - continue + p.Bytes = []byte{} + if endIndex > 0 { + base64Data := removeSpacesAndTabs(rest[:endIndex]) + p.Bytes = make([]byte, base64.StdEncoding.DecodedLen(len(base64Data))) + n, err := base64.StdEncoding.Decode(p.Bytes, base64Data) + if err != nil { + continue + } + p.Bytes = p.Bytes[:n] } - p.Bytes = p.Bytes[:n] // the -1 is because we might have only matched pemEnd without the // leading newline if the PEM block was empty. - _, rest = getLine(rest[endIndex+len(pemEnd)-1:]) + _, rest, _ = getLine(rest[endIndex+len(pemEnd)-1:]) return p, rest } } diff --git a/src/encoding/pem/pem_test.go b/src/encoding/pem/pem_test.go index e252ffd8ed1..fa6e8ba62bd 100644 --- a/src/encoding/pem/pem_test.go +++ b/src/encoding/pem/pem_test.go @@ -34,7 +34,7 @@ var getLineTests = []GetLineTest{ func TestGetLine(t *testing.T) { for i, test := range getLineTests { - x, y := getLine([]byte(test.in)) + x, y, _ := getLine([]byte(test.in)) if string(x) != test.out1 || string(y) != test.out2 { t.Errorf("#%d got:%+v,%+v want:%s,%s", i, x, y, test.out1, test.out2) } @@ -46,6 +46,7 @@ func TestDecode(t *testing.T) { if !reflect.DeepEqual(result, certificate) { t.Errorf("#0 got:%#v want:%#v", result, certificate) } + result, remainder = Decode(remainder) if !reflect.DeepEqual(result, privateKey) { t.Errorf("#1 got:%#v want:%#v", result, privateKey) @@ -68,7 +69,7 @@ func TestDecode(t *testing.T) { } result, remainder = Decode(remainder) - if result == nil || result.Type != "HEADERS" || len(result.Headers) != 1 { + if result == nil || result.Type != "VALID HEADERS" || len(result.Headers) != 1 { t.Errorf("#5 expected single header block but got :%v", result) } @@ -381,15 +382,15 @@ ZWAaUoVtWIQ52aKS0p19G99hhb+IVANC4akkdHV4SP8i7MVNZhfUmg== # This shouldn't be recognised because of the missing newline after the headers. ------BEGIN HEADERS----- +-----BEGIN INVALID HEADERS----- Header: 1 ------END HEADERS----- +-----END INVALID HEADERS----- # This should be valid, however. ------BEGIN HEADERS----- +-----BEGIN VALID HEADERS----- Header: 1 ------END HEADERS-----`) +-----END VALID HEADERS-----`) var certificate = &Block{Type: "CERTIFICATE", Headers: map[string]string{}, @@ -638,3 +639,104 @@ func TestBadEncode(t *testing.T) { } func testingKey(s string) string { return strings.ReplaceAll(s, "TESTING KEY", "PRIVATE KEY") } + +func TestDecodeStrangeCases(t *testing.T) { + sentinelType := "TEST BLOCK" + sentinelBytes := []byte("hello") + for _, tc := range []struct { + name string + pem string + }{ + { + name: "invalid section (not base64)", + pem: `-----BEGIN COMMENT----- +foo foo foo +-----END COMMENT----- +-----BEGIN TEST BLOCK----- +aGVsbG8= +-----END TEST BLOCK-----`, + }, + { + name: "leading garbage on block", + pem: `foo foo foo-----BEGIN CERTIFICATE----- +MCowBQYDK2VwAyEApVjJeLW5MoP6uR3+OeITokM+rBDng6dgl1vvhcy+wws= +-----END PUBLIC KEY----- +-----BEGIN TEST BLOCK----- +aGVsbG8= +-----END TEST BLOCK-----`, + }, + { + name: "leading garbage", + pem: `foo foo foo +-----BEGIN TEST BLOCK----- +aGVsbG8= +-----END TEST BLOCK-----`, + }, + { + name: "leading partial block", + pem: `foo foo foo +-----END COMMENT----- +-----BEGIN TEST BLOCK----- +aGVsbG8= +-----END TEST BLOCK-----`, + }, + { + name: "multiple BEGIN", + pem: `-----BEGIN TEST BLOCK----- +-----BEGIN TEST BLOCK----- +-----BEGIN TEST BLOCK----- +aGVsbG8= +-----END TEST BLOCK-----`, + }, + { + name: "multiple END", + pem: `-----BEGIN TEST BLOCK----- +aGVsbG8= +-----END TEST BLOCK----- +-----END TEST BLOCK----- +-----END TEST BLOCK-----`, + }, + { + name: "leading malformed BEGIN", + pem: `-----BEGIN PUBLIC KEY +aGVsbG8= +-----END PUBLIC KEY----- +-----BEGIN TEST BLOCK----- +aGVsbG8= +-----END TEST BLOCK-----`, + }, + } { + t.Run(tc.name, func(t *testing.T) { + block, _ := Decode([]byte(tc.pem)) + if block == nil { + t.Fatal("expected valid block") + } + if block.Type != sentinelType { + t.Fatalf("unexpected block returned, got type %q, want type %q", block.Type, sentinelType) + } + if !bytes.Equal(block.Bytes, sentinelBytes) { + t.Fatalf("unexpected block content, got %x, want %x", block.Bytes, sentinelBytes) + } + }) + } +} + +func TestJustEnd(t *testing.T) { + pemData := ` +-----END PUBLIC KEY-----` + + block, _ := Decode([]byte(pemData)) + if block != nil { + t.Fatal("unexpected block") + } +} + +func FuzzDecode(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + Decode(data) + }) +} + +func TestMissingEndTrailer(t *testing.T) { + Decode([]byte{0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0xa, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x45, 0x4e, 0x44, 0x20}) +} diff --git a/src/encoding/xml/marshal_test.go b/src/encoding/xml/marshal_test.go index b8bce7170a6..6c7e711aac0 100644 --- a/src/encoding/xml/marshal_test.go +++ b/src/encoding/xml/marshal_test.go @@ -2561,7 +2561,6 @@ var closeTests = []struct { func TestClose(t *testing.T) { for _, tt := range closeTests { - tt := tt t.Run(tt.desc, func(t *testing.T) { var out strings.Builder enc := NewEncoder(&out) diff --git a/src/errors/wrap.go b/src/errors/wrap.go index 2ebb951f1de..e4a5ca33d5e 100644 --- a/src/errors/wrap.go +++ b/src/errors/wrap.go @@ -25,6 +25,7 @@ func Unwrap(err error) error { } // Is reports whether any error in err's tree matches target. +// The target must be comparable. // // The tree consists of err itself, followed by the errors obtained by repeatedly // calling its Unwrap() error or Unwrap() []error method. When err wraps multiple diff --git a/src/fmt/errors.go b/src/fmt/errors.go index 1ac83404bc7..a0ce7ada346 100644 --- a/src/fmt/errors.go +++ b/src/fmt/errors.go @@ -6,6 +6,7 @@ package fmt import ( "errors" + "internal/stringslite" "slices" ) @@ -19,7 +20,22 @@ import ( // order they appear in the arguments. // It is invalid to supply the %w verb with an operand that does not implement // the error interface. The %w verb is otherwise a synonym for %v. -func Errorf(format string, a ...any) error { +func Errorf(format string, a ...any) (err error) { + // This function has been split in a somewhat unnatural way + // so that both it and the errors.New call can be inlined. + if err = errorf(format, a...); err != nil { + return err + } + // No formatting was needed. We can avoid some allocations and other work. + // See https://go.dev/cl/708836 for details. + return errors.New(format) +} + +// errorf formats and returns an error value, or nil if no formatting is required. +func errorf(format string, a ...any) error { + if len(a) == 0 && stringslite.IndexByte(format, '%') == -1 { + return nil + } p := newPrinter() p.wrapErrs = true p.doPrintf(format, a) diff --git a/src/fmt/errors_test.go b/src/fmt/errors_test.go index 4eb55faffe7..52bf42d0a62 100644 --- a/src/fmt/errors_test.go +++ b/src/fmt/errors_test.go @@ -54,6 +54,15 @@ func TestErrorf(t *testing.T) { }, { err: noVetErrorf("%w is not an error", "not-an-error"), wantText: "%!w(string=not-an-error) is not an error", + }, { + err: fmt.Errorf("no verbs"), + wantText: "no verbs", + }, { + err: noVetErrorf("no verbs with extra arg", "extra"), + wantText: "no verbs with extra arg%!(EXTRA string=extra)", + }, { + err: noVetErrorf("too many verbs: %w %v"), + wantText: "too many verbs: %!w(MISSING) %!v(MISSING)", }, { err: noVetErrorf("wrapped two errors: %w %w", errString("1"), errString("2")), wantText: "wrapped two errors: 1 2", diff --git a/src/fmt/fmt_test.go b/src/fmt/fmt_test.go index 86e458ae648..c07da5683c2 100644 --- a/src/fmt/fmt_test.go +++ b/src/fmt/fmt_test.go @@ -1480,6 +1480,7 @@ func BenchmarkFprintIntNoAlloc(b *testing.B) { var mallocBuf bytes.Buffer var mallocPointer *int // A pointer so we know the interface value won't allocate. +var sink any var mallocTest = []struct { count int @@ -1510,6 +1511,10 @@ var mallocTest = []struct { mallocBuf.Reset() Fprintf(&mallocBuf, "%x %x %x", mallocPointer, mallocPointer, mallocPointer) }}, + {0, `Errorf("hello")`, func() { _ = Errorf("hello") }}, + {2, `Errorf("hello: %x")`, func() { _ = Errorf("hello: %x", mallocPointer) }}, + {1, `sink = Errorf("hello")`, func() { sink = Errorf("hello") }}, + {2, `sink = Errorf("hello: %x")`, func() { sink = Errorf("hello: %x", mallocPointer) }}, } var _ bytes.Buffer diff --git a/src/go.mod b/src/go.mod index f134f0c7b57..c5e901b9ef2 100644 --- a/src/go.mod +++ b/src/go.mod @@ -3,11 +3,11 @@ module std go 1.26 require ( - golang.org/x/crypto v0.42.0 - golang.org/x/net v0.44.1-0.20251002015445-edb764c2296f + golang.org/x/crypto v0.43.0 + golang.org/x/net v0.46.0 ) require ( - golang.org/x/sys v0.36.0 // indirect - golang.org/x/text v0.29.0 // indirect + golang.org/x/sys v0.37.0 // indirect + golang.org/x/text v0.30.0 // indirect ) diff --git a/src/go.sum b/src/go.sum index f24bea029a2..4a52682161f 100644 --- a/src/go.sum +++ b/src/go.sum @@ -1,8 +1,8 @@ -golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= -golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= -golang.org/x/net v0.44.1-0.20251002015445-edb764c2296f h1:vNklv+oJQSYNGsWXHoCPi2MHMcpj9/Q7aBhvvfnJvGg= -golang.org/x/net v0.44.1-0.20251002015445-edb764c2296f/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= -golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= -golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= -golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= +golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= +golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= +golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= +golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= diff --git a/src/go/ast/ast.go b/src/go/ast/ast.go index 9aca39e868f..a6dab5bb517 100644 --- a/src/go/ast/ast.go +++ b/src/go/ast/ast.go @@ -134,10 +134,10 @@ func (g *CommentGroup) Text() string { } // Split on newlines. - cl := strings.Split(c, "\n") + cl := strings.SplitSeq(c, "\n") // Walk lines, stripping trailing white space and adding to list. - for _, l := range cl { + for l := range cl { lines = append(lines, stripTrailingWhitespace(l)) } } diff --git a/src/go/ast/directive.go b/src/go/ast/directive.go new file mode 100644 index 00000000000..901ed0ba0e0 --- /dev/null +++ b/src/go/ast/directive.go @@ -0,0 +1,179 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" + "go/token" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// A Directive is a comment of this form: +// +// //tool:name args +// +// For example, this directive: +// +// //go:generate stringer -type Op -trimprefix Op +// +// would have Tool "go", Name "generate", and Args "stringer -type Op +// -trimprefix Op". +// +// While Args does not have a strict syntax, by convention it is a +// space-separated sequence of unquoted words, '"'-quoted Go strings, or +// '`'-quoted raw strings. +// +// See https://go.dev/doc/comment#directives for specification. +type Directive struct { + Tool string + Name string + Args string // no leading or trailing whitespace + + // Slash is the position of the "//" at the beginning of the directive. + Slash token.Pos + + // ArgsPos is the position where Args begins, based on the position passed + // to ParseDirective. + ArgsPos token.Pos +} + +// ParseDirective parses a single comment line for a directive comment. +// +// If the line is not a directive comment, it returns false. +// +// The provided text must be a single line and should include the leading "//". +// If the text does not start with "//", it returns false. +// +// The caller may provide a file position of the start of c. This will be used +// to track the position of the arguments. This may be [Comment.Slash], +// synthesized by the caller, or simply 0. If the caller passes 0, then the +// positions are effectively byte offsets into the string c. +func ParseDirective(pos token.Pos, c string) (Directive, bool) { + // Fast path to eliminate most non-directive comments. Must be a line + // comment starting with [a-z0-9] + if !(len(c) >= 3 && c[0] == '/' && c[1] == '/' && isalnum(c[2])) { + return Directive{}, false + } + + buf := directiveScanner{c, pos} + buf.skip(len("//")) + + // Check for a valid directive and parse tool part. + // + // This logic matches isDirective. (We could combine them, but isDirective + // itself is duplicated in several places.) + colon := strings.Index(buf.str, ":") + if colon <= 0 || colon+1 >= len(buf.str) { + return Directive{}, false + } + for i := 0; i <= colon+1; i++ { + if i == colon { + continue + } + if !isalnum(buf.str[i]) { + return Directive{}, false + } + } + tool := buf.take(colon) + buf.skip(len(":")) + + // Parse name and args. + name := buf.takeNonSpace() + buf.skipSpace() + argsPos := buf.pos + args := strings.TrimRightFunc(buf.str, unicode.IsSpace) + + return Directive{tool, name, args, pos, argsPos}, true +} + +func isalnum(b byte) bool { + return 'a' <= b && b <= 'z' || '0' <= b && b <= '9' +} + +func (d *Directive) Pos() token.Pos { return d.Slash } +func (d *Directive) End() token.Pos { return token.Pos(int(d.ArgsPos) + len(d.Args)) } + +// A DirectiveArg is an argument to a directive comment. +type DirectiveArg struct { + // Arg is the parsed argument string. If the argument was a quoted string, + // this is its unquoted form. + Arg string + // Pos is the position of the first character in this argument. + Pos token.Pos +} + +// ParseArgs parses a [Directive]'s arguments using the standard convention, +// which is a sequence of tokens, where each token may be a bare word, or a +// double quoted Go string, or a back quoted raw Go string. Each token must be +// separated by one or more Unicode spaces. +// +// If the arguments do not conform to this syntax, it returns an error. +func (d *Directive) ParseArgs() ([]DirectiveArg, error) { + args := directiveScanner{d.Args, d.ArgsPos} + + list := []DirectiveArg{} + for args.skipSpace(); args.str != ""; args.skipSpace() { + var arg string + argPos := args.pos + + switch args.str[0] { + default: + arg = args.takeNonSpace() + + case '`', '"': + q, err := strconv.QuotedPrefix(args.str) + if err != nil { // Always strconv.ErrSyntax + return nil, fmt.Errorf("invalid quoted string in //%s:%s: %s", d.Tool, d.Name, args.str) + } + // Any errors will have been returned by QuotedPrefix + arg, _ = strconv.Unquote(args.take(len(q))) + + // Check that the quoted string is followed by a space (or nothing) + if args.str != "" { + r, _ := utf8.DecodeRuneInString(args.str) + if !unicode.IsSpace(r) { + return nil, fmt.Errorf("invalid quoted string in //%s:%s: %s", d.Tool, d.Name, args.str) + } + } + } + + list = append(list, DirectiveArg{arg, argPos}) + } + return list, nil +} + +// directiveScanner is a helper for parsing directive comments while maintaining +// position information. +type directiveScanner struct { + str string + pos token.Pos +} + +func (s *directiveScanner) skip(n int) { + s.pos += token.Pos(n) + s.str = s.str[n:] +} + +func (s *directiveScanner) take(n int) string { + res := s.str[:n] + s.skip(n) + return res +} + +func (s *directiveScanner) takeNonSpace() string { + i := strings.IndexFunc(s.str, unicode.IsSpace) + if i == -1 { + i = len(s.str) + } + return s.take(i) +} + +func (s *directiveScanner) skipSpace() { + trim := strings.TrimLeftFunc(s.str, unicode.IsSpace) + s.skip(len(s.str) - len(trim)) +} diff --git a/src/go/ast/directive_test.go b/src/go/ast/directive_test.go new file mode 100644 index 00000000000..ffabe584cb8 --- /dev/null +++ b/src/go/ast/directive_test.go @@ -0,0 +1,251 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ast + +import ( + "go/token" + "reflect" + "strings" + "testing" +) + +func TestParseDirectiveMatchesIsDirective(t *testing.T) { + for _, tt := range isDirectiveTests { + want := tt.ok + if strings.HasPrefix(tt.in, "extern ") || strings.HasPrefix(tt.in, "export ") { + // ParseDirective does NOT support extern or export, unlike + // isDirective. + want = false + } + + if _, ok := ParseDirective(0, "//"+tt.in); ok != want { + t.Errorf("ParseDirective(0, %q) = %v, want %v", "// "+tt.in, ok, want) + } + } +} + +func TestParseDirective(t *testing.T) { + for _, test := range []struct { + name string + in string + pos token.Pos + want Directive + wantOK bool + }{ + { + name: "valid", + in: "//go:generate stringer -type Op -trimprefix Op", + pos: 10, + want: Directive{ + Tool: "go", + Name: "generate", + Args: "stringer -type Op -trimprefix Op", + Slash: 10, + ArgsPos: token.Pos(10 + len("//go:generate ")), + }, + wantOK: true, + }, + { + name: "no args", + in: "//go:build ignore", + pos: 20, + want: Directive{ + Tool: "go", + Name: "build", + Args: "ignore", + Slash: 20, + ArgsPos: token.Pos(20 + len("//go:build ")), + }, + wantOK: true, + }, + { + name: "not a directive", + in: "// not a directive", + pos: 30, + wantOK: false, + }, + { + name: "not a comment", + in: "go:generate", + pos: 40, + wantOK: false, + }, + { + name: "empty", + in: "", + pos: 50, + wantOK: false, + }, + { + name: "just slashes", + in: "//", + pos: 60, + wantOK: false, + }, + { + name: "no name", + in: "//go:", + pos: 70, + wantOK: false, + }, + { + name: "no tool", + in: "//:generate", + pos: 80, + wantOK: false, + }, + { + name: "multiple spaces", + in: "//go:build foo bar", + pos: 90, + want: Directive{ + Tool: "go", + Name: "build", + Args: "foo bar", + Slash: 90, + ArgsPos: token.Pos(90 + len("//go:build ")), + }, + wantOK: true, + }, + { + name: "trailing space", + in: "//go:build foo ", + pos: 100, + want: Directive{ + Tool: "go", + Name: "build", + Args: "foo", + Slash: 100, + ArgsPos: token.Pos(100 + len("//go:build ")), + }, + wantOK: true, + }, + } { + t.Run(test.name, func(t *testing.T) { + got, gotOK := ParseDirective(test.pos, test.in) + if gotOK != test.wantOK { + t.Fatalf("ParseDirective(%q) ok = %v, want %v", test.in, gotOK, test.wantOK) + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("ParseDirective(%q) = %+v, want %+v", test.in, got, test.want) + } + }) + } +} + +func TestParseArgs(t *testing.T) { + for _, test := range []struct { + name string + in Directive + want []DirectiveArg + wantErr bool + }{ + { + name: "simple", + in: Directive{ + Tool: "go", + Name: "generate", + Args: "stringer -type Op", + ArgsPos: 10, + }, + want: []DirectiveArg{ + {"stringer", 10}, + {"-type", token.Pos(10 + len("stringer "))}, + {"Op", token.Pos(10 + len("stringer -type "))}, + }, + }, + { + name: "quoted", + in: Directive{ + Tool: "go", + Name: "generate", + Args: "\"foo bar\" baz", + ArgsPos: 10, + }, + want: []DirectiveArg{ + {"foo bar", 10}, + {"baz", token.Pos(10 + len("\"foo bar\" "))}, + }, + }, + { + name: "raw quoted", + in: Directive{ + Tool: "go", + Name: "generate", + Args: "`foo bar` baz", + ArgsPos: 10, + }, + want: []DirectiveArg{ + {"foo bar", 10}, + {"baz", token.Pos(10 + len("`foo bar` "))}, + }, + }, + { + name: "escapes", + in: Directive{ + Tool: "go", + Name: "generate", + Args: "\"foo\\U0001F60Abar\" `a\\tb`", + ArgsPos: 10, + }, + want: []DirectiveArg{ + {"foo😊bar", 10}, + {"a\\tb", token.Pos(10 + len("\"foo\\U0001F60Abar\" "))}, + }, + }, + { + name: "empty args", + in: Directive{ + Tool: "go", + Name: "build", + Args: "", + ArgsPos: 10, + }, + want: []DirectiveArg{}, + }, + { + name: "spaces", + in: Directive{ + Tool: "go", + Name: "build", + Args: " foo bar ", + ArgsPos: 10, + }, + want: []DirectiveArg{ + {"foo", token.Pos(10 + len(" "))}, + {"bar", token.Pos(10 + len(" foo "))}, + }, + }, + { + name: "unterminated quote", + in: Directive{ + Tool: "go", + Name: "generate", + Args: "`foo", + }, + wantErr: true, + }, + { + name: "no space after quote", + in: Directive{ + Tool: "go", + Name: "generate", + Args: `"foo"bar`, + }, + wantErr: true, + }, + } { + t.Run(test.name, func(t *testing.T) { + got, err := test.in.ParseArgs() + if err != nil && !test.wantErr { + t.Errorf("got ParseArgs(%+v) = error %s; want %+v", test.in, err, test.want) + } else if err == nil && test.wantErr { + t.Errorf("got ParseArgs(%+v) = %+v; want error", test.in, got) + } else if err == nil && !reflect.DeepEqual(got, test.want) { + t.Errorf("got ParseArgs(%+v) = %+v; want %+v", test.in, got, test.want) + } + }) + } +} diff --git a/src/go/build/build.go b/src/go/build/build.go index 76866c7487a..68fb8dbbd7a 100644 --- a/src/go/build/build.go +++ b/src/go/build/build.go @@ -1139,7 +1139,7 @@ func (ctxt *Context) importGo(p *Package, path, srcDir string, mode ImportMode) // we must not being doing special things like AllowBinary or IgnoreVendor, // and all the file system callbacks must be nil (we're meant to use the local file system). if mode&AllowBinary != 0 || mode&IgnoreVendor != 0 || - ctxt.JoinPath != nil || ctxt.SplitPathList != nil || ctxt.IsAbsPath != nil || ctxt.IsDir != nil || ctxt.HasSubdir != nil || ctxt.ReadDir != nil || ctxt.OpenFile != nil || !equal(ctxt.ToolTags, defaultToolTags) || !equal(ctxt.ReleaseTags, defaultReleaseTags) { + ctxt.JoinPath != nil || ctxt.SplitPathList != nil || ctxt.IsAbsPath != nil || ctxt.IsDir != nil || ctxt.HasSubdir != nil || ctxt.ReadDir != nil || ctxt.OpenFile != nil || !slices.Equal(ctxt.ToolTags, defaultToolTags) || !slices.Equal(ctxt.ReleaseTags, defaultReleaseTags) { return errNoModules } @@ -1279,18 +1279,6 @@ func (ctxt *Context) importGo(p *Package, path, srcDir string, mode ImportMode) return nil } -func equal(x, y []string) bool { - if len(x) != len(y) { - return false - } - for i, xi := range x { - if xi != y[i] { - return false - } - } - return true -} - // hasGoFiles reports whether dir contains any files with names ending in .go. // For a vendor check we must exclude directories that contain no .go files. // Otherwise it is not possible to vendor just a/b/c and still import the diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index 93abfd394af..8db0b5e92eb 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -49,7 +49,6 @@ var depsRules = ` internal/coverage/uleb128, internal/coverage/calloc, internal/cpu, - internal/ftoa, internal/goarch, internal/godebugs, internal/goexperiment, @@ -71,8 +70,9 @@ var depsRules = ` internal/goarch < internal/abi; internal/byteorder, internal/cpu, internal/goarch < internal/chacha8rand; + internal/goarch, math/bits < internal/strconv; - internal/cpu, internal/ftoa, internal/itoa < simd; + internal/cpu, internal/strconv < simd; # RUNTIME is the core runtime group of packages, all of them very light-weight. internal/abi, @@ -85,6 +85,7 @@ var depsRules = ` internal/goos, internal/itoa, internal/profilerecord, + internal/strconv, internal/trace/tracev2, math/bits, structs @@ -102,7 +103,6 @@ var depsRules = ` < internal/runtime/gc < internal/runtime/math < internal/runtime/maps - < internal/runtime/strconv < internal/runtime/cgroup < internal/runtime/gc/scan < runtime @@ -177,7 +177,7 @@ var depsRules = ` MATH < runtime/metrics; - MATH, unicode/utf8, internal/ftoa + MATH, unicode/utf8 < strconv; unicode !< strconv; @@ -241,7 +241,6 @@ var depsRules = ` internal/types/errors, mime/quotedprintable, net/internal/socktest, - net/url, runtime/trace, text/scanner, text/tabwriter; @@ -304,6 +303,12 @@ var depsRules = ` FMT < text/template/parse; + internal/bytealg, math/bits, slices, strconv, unique + < net/netip; + + FMT, net/netip + < net/url; + net/url, text/template/parse < text/template < internal/lazytemplate; @@ -418,9 +423,6 @@ var depsRules = ` < golang.org/x/net/dns/dnsmessage, golang.org/x/net/lif; - internal/bytealg, internal/itoa, math/bits, slices, strconv, unique - < net/netip; - os, net/netip < internal/routebsd; @@ -481,6 +483,8 @@ var depsRules = ` io, math/rand/v2 < crypto/internal/randutil; + NONE < crypto/internal/constanttime; + STR < crypto/internal/impl; OS < crypto/internal/sysrand @@ -492,13 +496,14 @@ var depsRules = ` time, internal/syscall/windows < crypto/internal/fips140deps/time; crypto/internal/fips140deps/time, errors, math/bits, sync/atomic, unsafe - < crypto/internal/fips140/entropy; + < crypto/internal/entropy/v1.0.0; STR, hash, crypto/internal/impl, crypto/internal/entropy, crypto/internal/randutil, - crypto/internal/fips140/entropy, + crypto/internal/constanttime, + crypto/internal/entropy/v1.0.0, crypto/internal/fips140deps/byteorder, crypto/internal/fips140deps/cpu, crypto/internal/fips140deps/godebug @@ -568,7 +573,7 @@ var depsRules = ` # CRYPTO-MATH is crypto that exposes math/big APIs - no cgo, net; fmt now ok. - CRYPTO, FMT, math/big + CRYPTO, FMT, math/big, internal/saferio < crypto/internal/boring/bbig < crypto/internal/fips140cache < crypto/rand @@ -808,7 +813,7 @@ var depsRules = ` FMT, testing < internal/cgrouptest; - regexp, internal/trace, internal/trace/raw, internal/txtar, testing + regexp, internal/testenv, internal/trace, internal/trace/raw, internal/txtar, testing < internal/trace/testtrace; C, CGO diff --git a/src/go/build/read.go b/src/go/build/read.go index 1273066dbc7..3185cf09bec 100644 --- a/src/go/build/read.go +++ b/src/go/build/read.go @@ -89,37 +89,24 @@ func (r *importReader) readByte() byte { return c } -// readByteNoBuf is like readByte but doesn't buffer the byte. -// It exhausts r.buf before reading from r.b. -func (r *importReader) readByteNoBuf() byte { - var c byte - var err error - if len(r.buf) > 0 { - c = r.buf[0] - r.buf = r.buf[1:] - } else { - c, err = r.b.ReadByte() - if err == nil && c == 0 { - err = errNUL +// readRest reads the entire rest of the file into r.buf. +func (r *importReader) readRest() { + for { + if len(r.buf) == cap(r.buf) { + // Grow the buffer + r.buf = append(r.buf, 0)[:len(r.buf)] + } + n, err := r.b.Read(r.buf[len(r.buf):cap(r.buf)]) + r.buf = r.buf[:len(r.buf)+n] + if err != nil { + if err == io.EOF { + r.eof = true + } else if r.err == nil { + r.err = err + } + break } } - - if err != nil { - if err == io.EOF { - r.eof = true - } else if r.err == nil { - r.err = err - } - return 0 - } - r.pos.Offset++ - if c == '\n' { - r.pos.Line++ - r.pos.Column = 1 - } else { - r.pos.Column++ - } - return c } // peekByte returns the next byte from the input reader but does not advance beyond it. @@ -182,130 +169,6 @@ func (r *importReader) nextByte(skipSpace bool) byte { return c } -var goEmbed = []byte("go:embed") - -// findEmbed advances the input reader to the next //go:embed comment. -// It reports whether it found a comment. -// (Otherwise it found an error or EOF.) -func (r *importReader) findEmbed(first bool) bool { - // The import block scan stopped after a non-space character, - // so the reader is not at the start of a line on the first call. - // After that, each //go:embed extraction leaves the reader - // at the end of a line. - startLine := !first - var c byte - for r.err == nil && !r.eof { - c = r.readByteNoBuf() - Reswitch: - switch c { - default: - startLine = false - - case '\n': - startLine = true - - case ' ', '\t': - // leave startLine alone - - case '"': - startLine = false - for r.err == nil { - if r.eof { - r.syntaxError() - } - c = r.readByteNoBuf() - if c == '\\' { - r.readByteNoBuf() - if r.err != nil { - r.syntaxError() - return false - } - continue - } - if c == '"' { - c = r.readByteNoBuf() - goto Reswitch - } - } - goto Reswitch - - case '`': - startLine = false - for r.err == nil { - if r.eof { - r.syntaxError() - } - c = r.readByteNoBuf() - if c == '`' { - c = r.readByteNoBuf() - goto Reswitch - } - } - - case '\'': - startLine = false - for r.err == nil { - if r.eof { - r.syntaxError() - } - c = r.readByteNoBuf() - if c == '\\' { - r.readByteNoBuf() - if r.err != nil { - r.syntaxError() - return false - } - continue - } - if c == '\'' { - c = r.readByteNoBuf() - goto Reswitch - } - } - - case '/': - c = r.readByteNoBuf() - switch c { - default: - startLine = false - goto Reswitch - - case '*': - var c1 byte - for (c != '*' || c1 != '/') && r.err == nil { - if r.eof { - r.syntaxError() - } - c, c1 = c1, r.readByteNoBuf() - } - startLine = false - - case '/': - if startLine { - // Try to read this as a //go:embed comment. - for i := range goEmbed { - c = r.readByteNoBuf() - if c != goEmbed[i] { - goto SkipSlashSlash - } - } - c = r.readByteNoBuf() - if c == ' ' || c == '\t' { - // Found one! - return true - } - } - SkipSlashSlash: - for c != '\n' && r.err == nil && !r.eof { - c = r.readByteNoBuf() - } - startLine = true - } - } - } - return false -} - // readKeyword reads the given keyword from the input. // If the keyword is not present, readKeyword records a syntax error. func (r *importReader) readKeyword(kw string) { @@ -436,9 +299,7 @@ func readGoInfo(f io.Reader, info *fileInfo) error { // we are sure we don't change the errors that go/parser returns. if r.err == errSyntax { r.err = nil - for r.err == nil && !r.eof { - r.readByte() - } + r.readRest() info.header = r.buf } if r.err != nil { @@ -511,23 +372,23 @@ func readGoInfo(f io.Reader, info *fileInfo) error { // (near the package statement or imports), the compiler // will reject them. They can be (and have already been) ignored. if hasEmbed { - var line []byte - for first := true; r.findEmbed(first); first = false { - line = line[:0] - pos := r.pos - for { - c := r.readByteNoBuf() - if c == '\n' || r.err != nil || r.eof { - break - } - line = append(line, c) + r.readRest() + fset := token.NewFileSet() + file := fset.AddFile(r.pos.Filename, -1, len(r.buf)) + var sc scanner.Scanner + sc.Init(file, r.buf, nil, scanner.ScanComments) + for { + pos, tok, lit := sc.Scan() + if tok == token.EOF { + break } - // Add args if line is well-formed. - // Ignore badly-formed lines - the compiler will report them when it finds them, - // and we can pretend they are not there to help go list succeed with what it knows. - embs, err := parseGoEmbed(string(line), pos) - if err == nil { - info.embeds = append(info.embeds, embs...) + if tok == token.COMMENT && strings.HasPrefix(lit, "//go:embed") { + // Ignore badly-formed lines - the compiler will report them when it finds them, + // and we can pretend they are not there to help go list succeed with what it knows. + embs, err := parseGoEmbed(fset, pos, lit) + if err == nil { + info.embeds = append(info.embeds, embs...) + } } } } @@ -549,75 +410,21 @@ func isValidImport(s string) bool { return s != "" } -// parseGoEmbed parses the text following "//go:embed" to extract the glob patterns. +// parseGoEmbed parses a "//go:embed" to extract the glob patterns. // It accepts unquoted space-separated patterns as well as double-quoted and back-quoted Go strings. -// This is based on a similar function in cmd/compile/internal/gc/noder.go; -// this version calculates position information as well. -func parseGoEmbed(args string, pos token.Position) ([]fileEmbed, error) { - trimBytes := func(n int) { - pos.Offset += n - pos.Column += utf8.RuneCountInString(args[:n]) - args = args[n:] +// This must match the behavior of cmd/compile/internal/noder.go. +func parseGoEmbed(fset *token.FileSet, pos token.Pos, comment string) ([]fileEmbed, error) { + dir, ok := ast.ParseDirective(pos, comment) + if !ok || dir.Tool != "go" || dir.Name != "embed" { + return nil, nil } - trimSpace := func() { - trim := strings.TrimLeftFunc(args, unicode.IsSpace) - trimBytes(len(args) - len(trim)) + args, err := dir.ParseArgs() + if err != nil { + return nil, err } - var list []fileEmbed - for trimSpace(); args != ""; trimSpace() { - var path string - pathPos := pos - Switch: - switch args[0] { - default: - i := len(args) - for j, c := range args { - if unicode.IsSpace(c) { - i = j - break - } - } - path = args[:i] - trimBytes(i) - - case '`': - var ok bool - path, _, ok = strings.Cut(args[1:], "`") - if !ok { - return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args) - } - trimBytes(1 + len(path) + 1) - - case '"': - i := 1 - for ; i < len(args); i++ { - if args[i] == '\\' { - i++ - continue - } - if args[i] == '"' { - q, err := strconv.Unquote(args[:i+1]) - if err != nil { - return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args[:i+1]) - } - path = q - trimBytes(i + 1) - break Switch - } - } - if i >= len(args) { - return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args) - } - } - - if args != "" { - r, _ := utf8.DecodeRuneInString(args) - if !unicode.IsSpace(r) { - return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args) - } - } - list = append(list, fileEmbed{path, pathPos}) + for _, arg := range args { + list = append(list, fileEmbed{arg.Arg, fset.Position(arg.Pos)}) } return list, nil } diff --git a/src/go/printer/printer_test.go b/src/go/printer/printer_test.go index 2a9c8be3003..604b8e5aea3 100644 --- a/src/go/printer/printer_test.go +++ b/src/go/printer/printer_test.go @@ -548,7 +548,6 @@ func TestBaseIndent(t *testing.T) { } for indent := 0; indent < 4; indent++ { - indent := indent t.Run(fmt.Sprint(indent), func(t *testing.T) { t.Parallel() var buf bytes.Buffer diff --git a/src/go/scanner/scanner.go b/src/go/scanner/scanner.go index 153252b5cc3..cdbeb6323c6 100644 --- a/src/go/scanner/scanner.go +++ b/src/go/scanner/scanner.go @@ -107,7 +107,7 @@ func (s *Scanner) peek() byte { return 0 } -// A mode value is a set of flags (or 0). +// A Mode value is a set of flags (or 0). // They control scanner behavior. type Mode uint diff --git a/src/go/token/position.go b/src/go/token/position.go index c3816b1672c..39756f257d3 100644 --- a/src/go/token/position.go +++ b/src/go/token/position.go @@ -511,7 +511,7 @@ func (s *FileSet) AddExistingFiles(files ...*File) { // } // // because all calls to AddFile must be in increasing order. - // AddExistingFilesFiles lets us augment an existing FileSet + // AddExistingFiles lets us augment an existing FileSet // sequentially, so long as all sets of files have disjoint ranges. // This approach also does not preserve line directives. diff --git a/src/go/types/alias.go b/src/go/types/alias.go index f15ff570303..a2eeb3afac8 100644 --- a/src/go/types/alias.go +++ b/src/go/types/alias.go @@ -116,7 +116,6 @@ func unalias(a0 *Alias) Type { for a := a0; a != nil; a, _ = t.(*Alias) { t = a.fromRHS } - // It's fine to memoize nil types since it's the zero value for actual. // It accomplishes nothing. a0.actual = t diff --git a/src/go/types/api_test.go b/src/go/types/api_test.go index f7a98ae2806..f31b9d30c58 100644 --- a/src/go/types/api_test.go +++ b/src/go/types/api_test.go @@ -2432,7 +2432,6 @@ type K = Nested[string] ) var wg sync.WaitGroup for i := 0; i < 2; i++ { - i := i wg.Add(1) go func() { defer wg.Done() @@ -2480,8 +2479,8 @@ func TestInstantiateErrors(t *testing.T) { t.Fatalf("Instantiate(%v, %v) returned nil error, want non-nil", T, test.targs) } - var argErr *ArgumentError - if !errors.As(err, &argErr) { + argErr, ok := errors.AsType[*ArgumentError](err) + if !ok { t.Fatalf("Instantiate(%v, %v): error is not an *ArgumentError", T, test.targs) } @@ -2496,8 +2495,8 @@ func TestArgumentErrorUnwrapping(t *testing.T) { Index: 1, Err: Error{Msg: "test"}, } - var e Error - if !errors.As(err, &e) { + e, ok := errors.AsType[Error](err) + if !ok { t.Fatalf("error %v does not wrap types.Error", err) } if e.Msg != "test" { @@ -2613,7 +2612,6 @@ func fn() { }) for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { if got := len(idents[test.name]); got != 1 { t.Fatalf("found %d identifiers named %s, want 1", got, test.name) diff --git a/src/go/types/assignments.go b/src/go/types/assignments.go index 3b40a9f8483..1524451af76 100644 --- a/src/go/types/assignments.go +++ b/src/go/types/assignments.go @@ -94,7 +94,7 @@ func (check *Checker) assignment(x *operand, T Type, context string) { // x.typ is typed // A generic (non-instantiated) function value cannot be assigned to a variable. - if sig, _ := under(x.typ).(*Signature); sig != nil && sig.TypeParams().Len() > 0 { + if sig, _ := x.typ.Underlying().(*Signature); sig != nil && sig.TypeParams().Len() > 0 { check.errorf(x, WrongTypeArgCount, "cannot use generic function %s without instantiation in %s", x, context) x.mode = invalid return @@ -264,7 +264,7 @@ func (check *Checker) assignVar(lhs, rhs ast.Expr, x *operand, context string) { var target *target // avoid calling ExprString if not needed if T != nil { - if _, ok := under(T).(*Signature); ok { + if _, ok := T.Underlying().(*Signature); ok { target = newTarget(T, ExprString(lhs)) } } diff --git a/src/go/types/builtins.go b/src/go/types/builtins.go index 9b03a40cbc3..90a3b4a901f 100644 --- a/src/go/types/builtins.go +++ b/src/go/types/builtins.go @@ -94,6 +94,17 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b // to type []byte with a second argument of string type followed by ... . // This form appends the bytes of the string." + // In either case, the first argument must be a slice; in particular it + // cannot be the predeclared nil value. Note that nil is not excluded by + // the assignability requirement alone for the special case (go.dev/issue/76220). + // spec: "If S is a type parameter, all types in its type set + // must have the same underlying slice type []E." + E, err := sliceElem(x) + if err != nil { + check.errorf(x, InvalidAppend, "invalid append: %s", err.format(check)) + return + } + // Handle append(bytes, y...) special case, where // the type set of y is {string} or {string, []byte}. var sig *Signature @@ -122,13 +133,6 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b // general case if sig == nil { - // spec: "If S is a type parameter, all types in its type set - // must have the same underlying slice type []E." - E, err := sliceElem(x) - if err != nil { - check.errorf(x, InvalidAppend, "invalid append: %s", err.format(check)) - return - } // check arguments by creating custom signature sig = makeSig(x.typ, x.typ, NewSlice(E)) // []E required for variadic signature sig.variadic = true @@ -147,7 +151,7 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b // len(x) mode := invalid var val constant.Value - switch t := arrayPtrDeref(under(x.typ)).(type) { + switch t := arrayPtrDeref(x.typ.Underlying()).(type) { case *Basic: if isString(t) && id == _Len { if x.mode == constant_ { @@ -206,7 +210,7 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b if mode == invalid { // avoid error if underlying type is invalid - if isValid(under(x.typ)) { + if isValid(x.typ.Underlying()) { code := InvalidCap if id == _Len { code = InvalidLen @@ -325,7 +329,7 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b // (applyTypeFunc never calls f with a type parameter) f := func(typ Type) Type { assert(!isTypeParam(typ)) - if t, _ := under(typ).(*Basic); t != nil { + if t, _ := typ.Underlying().(*Basic); t != nil { switch t.kind { case Float32: return Typ[Complex64] @@ -475,7 +479,7 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b // (applyTypeFunc never calls f with a type parameter) f := func(typ Type) Type { assert(!isTypeParam(typ)) - if t, _ := under(typ).(*Basic); t != nil { + if t, _ := typ.Underlying().(*Basic); t != nil { switch t.kind { case Complex64: return Typ[Float32] @@ -642,31 +646,31 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b // new(T) or new(expr) // (no argument evaluated yet) arg := argList[0] - check.exprOrType(x, arg, true) - var T Type + check.exprOrType(x, arg, false) + check.exclude(x, 1< want { @@ -211,7 +211,7 @@ func (check *Checker) callExpr(x *operand, call *ast.CallExpr) exprKind { check.errorf(call.Args[0], BadDotDotDotSyntax, "invalid use of ... in conversion to %s", T) break } - if t, _ := under(T).(*Interface); t != nil && !isTypeParam(T) { + if t, _ := T.Underlying().(*Interface); t != nil && !isTypeParam(T) { if !t.IsMethodSet() { check.errorf(call, MisplacedConstraintIface, "cannot use interface %s in conversion (contains specific type constraints or is comparable)", T) break @@ -815,7 +815,7 @@ func (check *Checker) selector(x *operand, e *ast.SelectorExpr, def *TypeName, w obj, index, indirect = lookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel, false) if obj == nil { // Don't report another error if the underlying type was invalid (go.dev/issue/49541). - if !isValid(under(x.typ)) { + if !isValid(x.typ.Underlying()) { goto Error } @@ -979,6 +979,7 @@ func (check *Checker) selector(x *operand, e *ast.SelectorExpr, def *TypeName, w Error: x.mode = invalid + x.typ = Typ[Invalid] x.expr = e } diff --git a/src/go/types/check.go b/src/go/types/check.go index c9753280bf8..44d3ae5586f 100644 --- a/src/go/types/check.go +++ b/src/go/types/check.go @@ -161,9 +161,10 @@ type Checker struct { fset *token.FileSet pkg *Package *Info - nextID uint64 // unique Id for type parameters (first valid Id is 1) - objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info - impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package + nextID uint64 // unique Id for type parameters (first valid Id is 1) + objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info + objList []Object // source-ordered keys of objMap + impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package // see TODO in validtype.go // valids instanceLookup // valid *Named (incl. instantiated) types per the validType check @@ -518,6 +519,12 @@ func (check *Checker) checkFiles(files []*ast.File) { print("== collectObjects ==") check.collectObjects() + print("== sortObjects ==") + check.sortObjects() + + print("== directCycles ==") + check.directCycles() + print("== packageObjects ==") check.packageObjects() diff --git a/src/go/types/const.go b/src/go/types/const.go index c1ed14abe2c..dc9efa0130e 100644 --- a/src/go/types/const.go +++ b/src/go/types/const.go @@ -35,7 +35,7 @@ func (check *Checker) overflow(x *operand, opPos token.Pos) { // x.typ cannot be a type parameter (type // parameters cannot be constant types). if isTyped(x.typ) { - check.representable(x, under(x.typ).(*Basic)) + check.representable(x, x.typ.Underlying().(*Basic)) return } diff --git a/src/go/types/conversions.go b/src/go/types/conversions.go index 6a9f263c57c..daef80adf80 100644 --- a/src/go/types/conversions.go +++ b/src/go/types/conversions.go @@ -21,7 +21,7 @@ func (check *Checker) conversion(x *operand, T Type) { constArg := x.mode == constant_ constConvertibleTo := func(T Type, val *constant.Value) bool { - switch t, _ := under(T).(*Basic); { + switch t, _ := T.Underlying().(*Basic); { case t == nil: // nothing to do case representableConst(x.val, check, t, val): @@ -145,8 +145,8 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool { origT := T V := Unalias(x.typ) T = Unalias(T) - Vu := under(V) - Tu := under(T) + Vu := V.Underlying() + Tu := T.Underlying() Vp, _ := V.(*TypeParam) Tp, _ := T.(*TypeParam) @@ -161,7 +161,7 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool { // and their pointer base types are not type parameters" if V, ok := V.(*Pointer); ok { if T, ok := T.(*Pointer); ok { - if IdenticalIgnoreTags(under(V.base), under(T.base)) && !isTypeParam(V.base) && !isTypeParam(T.base) { + if IdenticalIgnoreTags(V.base.Underlying(), T.base.Underlying()) && !isTypeParam(V.base) && !isTypeParam(T.base) { return true } } @@ -214,7 +214,7 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool { return false } case *Pointer: - if a, _ := under(a.Elem()).(*Array); a != nil { + if a, _ := a.Elem().Underlying().(*Array); a != nil { if Identical(s.Elem(), a.Elem()) { if check == nil || check.allowVersion(go1_17) { return true @@ -295,23 +295,23 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool { } func isUintptr(typ Type) bool { - t, _ := under(typ).(*Basic) + t, _ := typ.Underlying().(*Basic) return t != nil && t.kind == Uintptr } func isUnsafePointer(typ Type) bool { - t, _ := under(typ).(*Basic) + t, _ := typ.Underlying().(*Basic) return t != nil && t.kind == UnsafePointer } func isPointer(typ Type) bool { - _, ok := under(typ).(*Pointer) + _, ok := typ.Underlying().(*Pointer) return ok } func isBytesOrRunes(typ Type) bool { - if s, _ := under(typ).(*Slice); s != nil { - t, _ := under(s.elem).(*Basic) + if s, _ := typ.Underlying().(*Slice); s != nil { + t, _ := s.elem.Underlying().(*Basic) return t != nil && (t.kind == Byte || t.kind == Rune) } return false diff --git a/src/go/types/cycles.go b/src/go/types/cycles.go new file mode 100644 index 00000000000..87e8e9729b2 --- /dev/null +++ b/src/go/types/cycles.go @@ -0,0 +1,105 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types + +import "go/ast" + +// directCycles searches for direct cycles among package level type declarations. +// See directCycle for details. +func (check *Checker) directCycles() { + pathIdx := make(map[*TypeName]int) + for _, obj := range check.objList { + if tname, ok := obj.(*TypeName); ok { + check.directCycle(tname, pathIdx) + } + } +} + +// directCycle checks if the declaration of the type given by tname contains a direct cycle. +// A direct cycle exists if the path from tname's declaration's RHS leads from type name to +// type name and eventually ends up on that path again, via regular or alias declarations; +// in other words if there are no type literals (or basic types) on the path, and the path +// doesn't end in an undeclared object. +// If a cycle is detected, a cycle error is reported and the type at the start of the cycle +// is marked as invalid. +// +// The pathIdx map tracks which type names have been processed. An entry can be +// in 1 of 3 states as used in a typical 3-state (white/grey/black) graph marking +// algorithm for cycle detection: +// +// - entry not found: tname has not been seen before (white) +// - value is >= 0 : tname has been seen but is not done (grey); the value is the path index +// - value is < 0 : tname has been seen and is done (black) +// +// When directCycle returns, the pathIdx entries for all type names on the path +// that starts at tname are marked black, regardless of whether there was a cycle. +// This ensures that a type name is traversed only once. +func (check *Checker) directCycle(tname *TypeName, pathIdx map[*TypeName]int) { + if debug && check.conf._Trace { + check.trace(tname.Pos(), "-- check direct cycle for %s", tname) + } + + var path []*TypeName + for { + start, found := pathIdx[tname] + if start < 0 { + // tname is marked black - do not traverse it again. + // (start can only be < 0 if it was found in the first place) + break + } + + if found { + // tname is marked grey - we have a cycle on the path beginning at start. + // Mark tname as invalid. + tname.setType(Typ[Invalid]) + tname.setColor(black) + + // collect type names on cycle + var cycle []Object + for _, tname := range path[start:] { + cycle = append(cycle, tname) + } + + check.cycleError(cycle, firstInSrc(cycle)) + break + } + + // tname is marked white - mark it grey and add it to the path. + pathIdx[tname] = len(path) + path = append(path, tname) + + // For direct cycle detection, we don't care about whether we have an alias or not. + // If the associated type is not a name, we're at the end of the path and we're done. + rhs, ok := check.objMap[tname].tdecl.Type.(*ast.Ident) + if !ok { + break + } + + // Determine the RHS type. If it is not found in the package scope, we either + // have an error (which will be reported later), or the type exists elsewhere + // (universe scope, file scope via dot-import) and a cycle is not possible in + // the first place. If it is not a type name, we cannot have a direct cycle + // either. In all these cases we can stop. + tname1, ok := check.pkg.scope.Lookup(rhs.Name).(*TypeName) + if !ok { + break + } + + // Otherwise, continue with the RHS. + tname = tname1 + } + + // Mark all traversed type names black. + // (ensure that pathIdx doesn't contain any grey entries upon returning) + for _, tname := range path { + pathIdx[tname] = -1 + } + + if debug { + for _, i := range pathIdx { + assert(i < 0) + } + } +} diff --git a/src/go/types/decl.go b/src/go/types/decl.go index 42423d291ce..2dab5cf7b94 100644 --- a/src/go/types/decl.go +++ b/src/go/types/decl.go @@ -226,8 +226,8 @@ func (check *Checker) validCycle(obj Object) (valid bool) { start := obj.color() - grey // index of obj in objPath cycle := check.objPath[start:] tparCycle := false // if set, the cycle is through a type parameter list - nval := 0 // number of (constant or variable) values in the cycle; valid if !generic - ndef := 0 // number of type definitions in the cycle; valid if !generic + nval := 0 // number of (constant or variable) values in the cycle + ndef := 0 // number of type definitions in the cycle loop: for _, obj := range cycle { switch obj := obj.(type) { @@ -236,7 +236,7 @@ loop: case *TypeName: // If we reach a generic type that is part of a cycle // and we are in a type parameter list, we have a cycle - // through a type parameter list, which is invalid. + // through a type parameter list. if check.inTParamList && isGeneric(obj.typ) { tparCycle = true break loop @@ -287,20 +287,23 @@ loop: }() } - if !tparCycle { - // A cycle involving only constants and variables is invalid but we - // ignore them here because they are reported via the initialization - // cycle check. - if nval == len(cycle) { - return true - } + // Cycles through type parameter lists are ok (go.dev/issue/68162). + if tparCycle { + return true + } - // A cycle involving only types (and possibly functions) must have at least - // one type definition to be permitted: If there is no type definition, we - // have a sequence of alias type names which will expand ad infinitum. - if nval == 0 && ndef > 0 { - return true - } + // A cycle involving only constants and variables is invalid but we + // ignore them here because they are reported via the initialization + // cycle check. + if nval == len(cycle) { + return true + } + + // A cycle involving only types (and possibly functions) must have at least + // one type definition to be permitted: If there is no type definition, we + // have a sequence of alias type names which will expand ad infinitum. + if nval == 0 && ndef > 0 { + return true } check.cycleError(cycle, firstInSrc(cycle)) @@ -463,7 +466,7 @@ func (check *Checker) constDecl(obj *Const, typ, init ast.Expr, inherited bool) if !isConstType(t) { // don't report an error if the type is an invalid C (defined) type // (go.dev/issue/22090) - if isValid(under(t)) { + if isValid(t.Underlying()) { check.errorf(typ, InvalidConstType, "invalid constant type %s", t) } obj.typ = Typ[Invalid] @@ -548,7 +551,7 @@ func (check *Checker) isImportedConstraint(typ Type) bool { if named == nil || named.obj.pkg == check.pkg || named.obj.pkg == nil { return false } - u, _ := named.under().(*Interface) + u, _ := named.Underlying().(*Interface) return u != nil && !u.IsMethodSet() } @@ -608,7 +611,7 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *ast.TypeSpec, def *TypeName assert(rhs != nil) alias.fromRHS = rhs - unalias(alias) // resolve alias.actual + unalias(alias) // populate alias.actual } else { // With Go1.23, the default behavior is to use Alias nodes, // reflected by check.enableAlias. Signal non-default behavior. @@ -637,31 +640,33 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *ast.TypeSpec, def *TypeName named := check.newNamed(obj, nil, nil) setDefType(def, named) + // The RHS of a named N can be nil if, for example, N is defined as a cycle of aliases with + // gotypesalias=0. Consider: + // + // type D N // N.unpack() will panic + // type N A + // type A = N // N.fromRHS is not set before N.unpack(), since A does not call setDefType + // + // There is likely a better way to detect such cases, but it may not be worth the effort. + // Instead, we briefly permit a nil N.fromRHS while type-checking D. + named.allowNilRHS = true + defer (func() { named.allowNilRHS = false })() + if tdecl.TypeParams != nil { check.openScope(tdecl, "type parameters") defer check.closeScope() check.collectTypeParams(&named.tparams, tdecl.TypeParams) } - // determine underlying type of named rhs = check.definedType(tdecl.Type, obj) assert(rhs != nil) named.fromRHS = rhs - // If the underlying type was not set while type-checking the right-hand - // side, it is invalid and an error should have been reported elsewhere. - if named.underlying == nil { - named.underlying = Typ[Invalid] - } - - // Disallow a lone type parameter as the RHS of a type declaration (go.dev/issue/45639). - // We don't need this restriction anymore if we make the underlying type of a type - // parameter its constraint interface: if the RHS is a lone type parameter, we will - // use its underlying type (like we do for any RHS in a type declaration), and its - // underlying type is an interface and the type declaration is well defined. + // spec: "In a type definition the given type cannot be a type parameter." + // (See also go.dev/issue/45639.) if isTypeParam(rhs) { check.error(tdecl.Type, MisplacedTypeParam, "cannot use a type parameter as RHS in type declaration") - named.underlying = Typ[Invalid] + named.fromRHS = Typ[Invalid] } } @@ -814,7 +819,7 @@ func (check *Checker) collectMethods(obj *TypeName) { } func (check *Checker) checkFieldUniqueness(base *Named) { - if t, _ := base.under().(*Struct); t != nil { + if t, _ := base.Underlying().(*Struct); t != nil { var mset objset for i := 0; i < base.NumMethods(); i++ { m := base.Method(i) diff --git a/src/go/types/errors.go b/src/go/types/errors.go index be1ec5d5f7e..fabcbe602a6 100644 --- a/src/go/types/errors.go +++ b/src/go/types/errors.go @@ -57,7 +57,7 @@ func (check *Checker) newError(code Code) *error_ { // Subsequent calls to addf provide additional information in the form of additional lines // in the error message (types2) or continuation errors identified by a tab-indented error // message (go/types). -func (err *error_) addf(at positioner, format string, args ...interface{}) { +func (err *error_) addf(at positioner, format string, args ...any) { err.desc = append(err.desc, errorDesc{at, err.check.sprintf(format, args...)}) } diff --git a/src/go/types/expr.go b/src/go/types/expr.go index 97d8c429978..8b3f764f192 100644 --- a/src/go/types/expr.go +++ b/src/go/types/expr.go @@ -336,7 +336,7 @@ func (check *Checker) updateExprType(x ast.Expr, typ Type, final bool) { // If the new type is not final and still untyped, just // update the recorded type. if !final && isUntyped(typ) { - old.typ = under(typ).(*Basic) + old.typ = typ.Underlying().(*Basic) check.untyped[x] = old return } @@ -398,7 +398,7 @@ func (check *Checker) implicitTypeAndValue(x *operand, target Type) (Type, const return nil, nil, InvalidUntypedConversion } - switch u := under(target).(type) { + switch u := target.Underlying().(type) { case *Basic: if x.mode == constant_ { v, code := check.representation(x, u) @@ -605,7 +605,7 @@ Error: // incomparableCause returns a more specific cause why typ is not comparable. // If there is no more specific cause, the result is "". func (check *Checker) incomparableCause(typ Type) string { - switch under(typ).(type) { + switch typ.Underlying().(type) { case *Slice, *Signature, *Map: return compositeKind(typ) + " can only be compared to nil" } @@ -955,7 +955,7 @@ type target struct { // The result is nil if typ is not a signature. func newTarget(typ Type, desc string) *target { if typ != nil { - if sig, _ := under(typ).(*Signature); sig != nil { + if sig, _ := typ.Underlying().(*Signature); sig != nil { return &target{sig, desc} } } @@ -1101,7 +1101,7 @@ func (check *Checker) exprInternal(T *target, x *operand, e ast.Expr, hint Type) check.errorf(x, InvalidAssert, invalidOp+"cannot use type assertion on type parameter value %s", x) goto Error } - if _, ok := under(x.typ).(*Interface); !ok { + if _, ok := x.typ.Underlying().(*Interface); !ok { check.errorf(x, InvalidAssert, invalidOp+"%s is not an interface", x) goto Error } @@ -1198,7 +1198,7 @@ Error: // represented as an integer (such as 1.0) it is returned as an integer value. // This ensures that constants of different kind but equal value (such as // 1.0 + 0i, 1.0, 1) result in the same value. -func keyVal(x constant.Value) interface{} { +func keyVal(x constant.Value) any { switch x.Kind() { case constant.Complex: f := constant.ToFloat(x) diff --git a/src/go/types/gcsizes.go b/src/go/types/gcsizes.go index 227c53e1d28..adc350a6c2d 100644 --- a/src/go/types/gcsizes.go +++ b/src/go/types/gcsizes.go @@ -19,7 +19,7 @@ func (s *gcSizes) Alignof(T Type) (result int64) { // For arrays and structs, alignment is defined in terms // of alignment of the elements and fields, respectively. - switch t := under(T).(type) { + switch t := T.Underlying().(type) { case *Array: // spec: "For a variable x of array type: unsafe.Alignof(x) // is the same as unsafe.Alignof(x[0]), but at least 1." @@ -99,7 +99,7 @@ func (s *gcSizes) Offsetsof(fields []*Var) []int64 { } func (s *gcSizes) Sizeof(T Type) int64 { - switch t := under(T).(type) { + switch t := T.Underlying().(type) { case *Basic: assert(isTyped(T)) k := t.kind diff --git a/src/go/types/hilbert_test.go b/src/go/types/hilbert_test.go index afd7ee28bd2..8e807b36bfc 100644 --- a/src/go/types/hilbert_test.go +++ b/src/go/types/hilbert_test.go @@ -71,7 +71,7 @@ type gen struct { bytes.Buffer } -func (g *gen) p(format string, args ...interface{}) { +func (g *gen) p(format string, args ...any) { fmt.Fprintf(&g.Buffer, format, args...) } diff --git a/src/go/types/index.go b/src/go/types/index.go index 1d4f36dcf3c..42e47200131 100644 --- a/src/go/types/index.go +++ b/src/go/types/index.go @@ -36,7 +36,7 @@ func (check *Checker) indexExpr(x *operand, e *indexedExpr) (isFuncInst bool) { return false case value: - if sig, _ := under(x.typ).(*Signature); sig != nil && sig.TypeParams().Len() > 0 { + if sig, _ := x.typ.Underlying().(*Signature); sig != nil && sig.TypeParams().Len() > 0 { // function instantiation return true } @@ -51,7 +51,7 @@ func (check *Checker) indexExpr(x *operand, e *indexedExpr) (isFuncInst bool) { // ordinary index expression valid := false length := int64(-1) // valid if >= 0 - switch typ := under(x.typ).(type) { + switch typ := x.typ.Underlying().(type) { case *Basic: if isString(typ) { valid = true @@ -74,7 +74,7 @@ func (check *Checker) indexExpr(x *operand, e *indexedExpr) (isFuncInst bool) { x.typ = typ.elem case *Pointer: - if typ, _ := under(typ.base).(*Array); typ != nil { + if typ, _ := typ.base.Underlying().(*Array); typ != nil { valid = true length = typ.len x.mode = variable @@ -125,7 +125,7 @@ func (check *Checker) indexExpr(x *operand, e *indexedExpr) (isFuncInst bool) { mode = value } case *Pointer: - if t, _ := under(t.base).(*Array); t != nil { + if t, _ := t.base.Underlying().(*Array); t != nil { l = t.len e = t.elem } @@ -252,7 +252,7 @@ func (check *Checker) sliceExpr(x *operand, e *ast.SliceExpr) { // but don't go from untyped string to string. cu = Typ[String] if !isTypeParam(x.typ) { - cu = under(x.typ) // untyped string remains untyped + cu = x.typ.Underlying() // untyped string remains untyped } } @@ -297,7 +297,7 @@ func (check *Checker) sliceExpr(x *operand, e *ast.SliceExpr) { x.typ = &Slice{elem: u.elem} case *Pointer: - if u, _ := under(u.base).(*Array); u != nil { + if u, _ := u.base.Underlying().(*Array); u != nil { valid = true length = u.len x.typ = &Slice{elem: u.elem} diff --git a/src/go/types/infer.go b/src/go/types/infer.go index e955880674c..25a26b38a5e 100644 --- a/src/go/types/infer.go +++ b/src/go/types/infer.go @@ -430,7 +430,7 @@ func (check *Checker) infer(posn positioner, tparams []*TypeParam, targs []Type, // Note that if t0 was a signature, t1 must be a signature, and t1 // can only be a generic signature if it originated from a generic // function argument. Those signatures are never defined types and - // thus there is no need to call under below. + // thus there is no need to call Underlying below. // TODO(gri) Consider doing this in Checker.subst. // Then this would fall out automatically here and also // in instantiation (where we also explicitly nil out @@ -671,7 +671,7 @@ func coreTerm(tpar *TypeParam) (*term, bool) { if n == 1 { if debug { u, _ := commonUnder(tpar, nil) - assert(under(single.typ) == u) + assert(single.typ.Underlying() == u) } return single, true } diff --git a/src/go/types/instantiate.go b/src/go/types/instantiate.go index eef473447da..6488494cd83 100644 --- a/src/go/types/instantiate.go +++ b/src/go/types/instantiate.go @@ -86,7 +86,7 @@ func Instantiate(ctxt *Context, orig Type, targs []Type, validate bool) (Type, e // // For Named types the resulting instance may be unexpanded. // -// check may be nil (when not type-checking syntax); pos is used only only if check is non-nil. +// check may be nil (when not type-checking syntax); pos is used only if check is non-nil. func (check *Checker) instance(pos token.Pos, orig genericType, targs []Type, expanding *Named, ctxt *Context) (res Type) { // The order of the contexts below matters: we always prefer instances in the // expanding instance context in order to preserve reference cycles. @@ -229,12 +229,12 @@ func (check *Checker) verify(pos token.Pos, tparams []*TypeParam, targs []Type, // If the provided cause is non-nil, it may be set to an error string // explaining why V does not implement (or satisfy, for constraints) T. func (check *Checker) implements(V, T Type, constraint bool, cause *string) bool { - Vu := under(V) - Tu := under(T) + Vu := V.Underlying() + Tu := T.Underlying() if !isValid(Vu) || !isValid(Tu) { return true // avoid follow-on errors } - if p, _ := Vu.(*Pointer); p != nil && !isValid(under(p.base)) { + if p, _ := Vu.(*Pointer); p != nil && !isValid(p.base.Underlying()) { return true // avoid follow-on errors (see go.dev/issue/49541 for an example) } @@ -342,7 +342,7 @@ func (check *Checker) implements(V, T Type, constraint bool, cause *string) bool // If V ∉ t.typ but V ∈ ~t.typ then remember this type // so we can suggest it as an alternative in the error // message. - if alt == nil && !t.tilde && Identical(t.typ, under(t.typ)) { + if alt == nil && !t.tilde && Identical(t.typ, t.typ.Underlying()) { tt := *t tt.tilde = true if tt.includes(V) { diff --git a/src/go/types/lookup.go b/src/go/types/lookup.go index 16d63ae0f11..97debb7395e 100644 --- a/src/go/types/lookup.go +++ b/src/go/types/lookup.go @@ -148,14 +148,14 @@ func lookupFieldOrMethodImpl(T Type, addressable bool, pkg *Package, name string return // blank fields/methods are never found } - // Importantly, we must not call under before the call to deref below (nor - // does deref call under), as doing so could incorrectly result in finding + // Importantly, we must not call Underlying before the call to deref below (nor + // does deref call Underlying), as doing so could incorrectly result in finding // methods of the pointer base type when T is a (*Named) pointer type. typ, isPtr := deref(T) // *typ where typ is an interface (incl. a type parameter) has no methods. if isPtr { - if _, ok := under(typ).(*Interface); ok { + if _, ok := typ.Underlying().(*Interface); ok { return } } @@ -205,7 +205,7 @@ func lookupFieldOrMethodImpl(T Type, addressable bool, pkg *Package, name string } } - switch t := under(typ).(type) { + switch t := typ.Underlying().(type) { case *Struct: // look for a matching field and collect embedded types for i, f := range t.fields { @@ -376,7 +376,7 @@ func MissingMethod(V Type, T *Interface, static bool) (method *Func, wrongType b // The comparator is used to compare signatures. // If a method is missing and cause is not nil, *cause describes the error. func (check *Checker) missingMethod(V, T Type, static bool, equivalent func(x, y Type) bool, cause *string) (method *Func, wrongType bool) { - methods := under(T).(*Interface).typeSet().methods // T must be an interface + methods := T.Underlying().(*Interface).typeSet().methods // T must be an interface if len(methods) == 0 { return nil, false } @@ -396,7 +396,7 @@ func (check *Checker) missingMethod(V, T Type, static bool, equivalent func(x, y var m *Func // method on T we're trying to implement var f *Func // method on V, if found (state is one of ok, wrongName, wrongSig) - if u, _ := under(V).(*Interface); u != nil { + if u, _ := V.Underlying().(*Interface); u != nil { tset := u.typeSet() for _, m = range methods { _, f = tset.LookupMethod(m.pkg, m.name, false) @@ -537,7 +537,7 @@ func (check *Checker) hasAllMethods(V, T Type, static bool, equivalent func(x, y // hasInvalidEmbeddedFields reports whether T is a struct (or a pointer to a struct) that contains // (directly or indirectly) embedded fields with invalid types. func hasInvalidEmbeddedFields(T Type, seen map[*Struct]bool) bool { - if S, _ := under(derefStructPtr(T)).(*Struct); S != nil && !seen[S] { + if S, _ := derefStructPtr(T).Underlying().(*Struct); S != nil && !seen[S] { if seen == nil { seen = make(map[*Struct]bool) } @@ -552,14 +552,14 @@ func hasInvalidEmbeddedFields(T Type, seen map[*Struct]bool) bool { } func isInterfacePtr(T Type) bool { - p, _ := under(T).(*Pointer) + p, _ := T.Underlying().(*Pointer) return p != nil && IsInterface(p.base) } // check may be nil. func (check *Checker) interfacePtrError(T Type) string { assert(isInterfacePtr(T)) - if p, _ := under(T).(*Pointer); isTypeParam(p.base) { + if p, _ := T.Underlying().(*Pointer); isTypeParam(p.base) { return check.sprintf("type %s is pointer to type parameter, not type parameter", T) } return check.sprintf("type %s is pointer to interface, not interface", T) @@ -632,8 +632,8 @@ func deref(typ Type) (Type, bool) { // derefStructPtr dereferences typ if it is a (named or unnamed) pointer to a // (named or unnamed) struct and returns its base. Otherwise it returns typ. func derefStructPtr(typ Type) Type { - if p, _ := under(typ).(*Pointer); p != nil { - if _, ok := under(p.base).(*Struct); ok { + if p, _ := typ.Underlying().(*Pointer); p != nil { + if _, ok := p.base.Underlying().(*Struct); ok { return p.base } } diff --git a/src/go/types/methodset.go b/src/go/types/methodset.go index ac8f0bdd288..2eac62e6028 100644 --- a/src/go/types/methodset.go +++ b/src/go/types/methodset.go @@ -133,7 +133,7 @@ func NewMethodSet(T Type) *MethodSet { } } - switch t := under(typ).(type) { + switch t := typ.Underlying().(type) { case *Struct: for i, f := range t.fields { if fset == nil { diff --git a/src/go/types/named.go b/src/go/types/named.go index b2f99ffccb9..b106d7a8eb7 100644 --- a/src/go/types/named.go +++ b/src/go/types/named.go @@ -36,7 +36,7 @@ import ( // In cases 1, 3, and 4, it is possible that the underlying type or methods of // N may not be immediately available. // - During type-checking, we allocate N before type-checking its underlying -// type or methods, so that we may resolve recursive references. +// type or methods, so that we can create recursive references. // - When loading from export data, we may load its methods and underlying // type lazily using a provided load function. // - After instantiating, we lazily expand the underlying type and methods @@ -50,10 +50,8 @@ import ( // soon. // // We achieve this by tracking state with an atomic state variable, and -// guarding potentially concurrent calculations with a mutex. At any point in -// time this state variable determines which data on N may be accessed. As -// state monotonically progresses, any data available at state M may be -// accessed without acquiring the mutex at state N, provided N >= M. +// guarding potentially concurrent calculations with a mutex. See [stateMask] +// for details. // // GLOSSARY: Here are a few terms used in this file to describe Named types: // - We say that a Named type is "instantiated" if it has been constructed by @@ -62,18 +60,19 @@ import ( // declaration in the source. Instantiated named types correspond to a type // instantiation in the source, not a declaration. But their Origin type is // a declared type. -// - We say that a Named type is "resolved" if its RHS information has been -// loaded or fully type-checked. For Named types constructed from export -// data, this may involve invoking a loader function to extract information -// from export data. For instantiated named types this involves reading -// information from their origin. +// - We say that a Named type is "unpacked" if its RHS information has been +// populated, normalizing its representation for use in type-checking +// operations and abstracting away how it was created: +// - For a Named type constructed from unified IR, this involves invoking +// a lazy loader function to extract details from UIR as needed. +// - For an instantiated Named type, this involves extracting information +// from its origin and substituting type arguments into a "synthetic" +// RHS; this process is called "expanding" the RHS (see below). // - We say that a Named type is "expanded" if it is an instantiated type and -// type parameters in its underlying type and methods have been substituted -// with the type arguments from the instantiation. A type may be partially -// expanded if some but not all of these details have been substituted. -// Similarly, we refer to these individual details (underlying type or -// method) as being "expanded". -// - When all information is known for a named type, we say it is "complete". +// type parameters in its RHS and methods have been substituted with the type +// arguments from the instantiation. A type may be partially expanded if some +// but not all of these details have been substituted. Similarly, we refer to +// these individual details (RHS or method) as being "expanded". // // Some invariants to keep in mind: each declared Named type has a single // corresponding object, and that object's type is the (possibly generic) Named @@ -90,8 +89,8 @@ import ( // presence of a cycle of named types, expansion will eventually find an // existing instance in the Context and short-circuit the expansion. // -// Once an instance is complete, we can nil out this shared Context to unpin -// memory, though this Context may still be held by other incomplete instances +// Once an instance is fully expanded, we can nil out this shared Context to unpin +// memory, though the Context may still be held by other incomplete instances // in its "lineage". // A Named represents a named (defined) type. @@ -110,18 +109,17 @@ type Named struct { check *Checker // non-nil during type-checking; nil otherwise obj *TypeName // corresponding declared object for declared types; see above for instantiated types - // fromRHS holds the type (on RHS of declaration) this *Named type is derived - // from (for cycle reporting). Only used by validType, and therefore does not - // require synchronization. - fromRHS Type + // flags indicating temporary violations of the invariants for fromRHS and underlying + allowNilRHS bool // same as below, as well as briefly during checking of a type declaration + allowNilUnderlying bool // may be true from creation via [NewNamed] until [Named.SetUnderlying] - // information for instantiated types; nil otherwise - inst *instance + inst *instance // information for instantiated types; nil otherwise mu sync.Mutex // guards all fields below - state_ uint32 // the current state of this type; must only be accessed atomically - underlying Type // possibly a *Named during setup; never a *Named once set up completely + state_ uint32 // the current state of this type; must only be accessed atomically or when mu is held + fromRHS Type // the declaration RHS this type is derived from tparams *TypeParamList // type parameters, or nil + underlying Type // underlying type, or nil // methods declared for this type (not the method set of this type) // Signatures are type-checked lazily. @@ -143,15 +141,43 @@ type instance struct { ctxt *Context // local Context; set to nil after full expansion } -// namedState represents the possible states that a named type may assume. -type namedState uint32 +// stateMask represents each state in the lifecycle of a named type. +// +// Each named type begins in the initial state. A named type may transition to a new state +// according to the below diagram: +// +// initial +// lazyLoaded +// unpacked +// └── hasMethods +// └── hasUnder +// +// That is, descent down the tree is mostly linear (initial through unpacked), except upon +// reaching the leaves (hasMethods and hasUnder). A type may occupy any combination of the +// leaf states at once (they are independent states). +// +// To represent this independence, the set of active states is represented with a bit set. State +// transitions are monotonic. Once a state bit is set, it remains set. +// +// The above constraints significantly narrow the possible bit sets for a named type. With bits +// set left-to-right, they are: +// +// 0000 | initial +// 1000 | lazyLoaded +// 1100 | unpacked, which implies lazyLoaded +// 1110 | hasMethods, which implies unpacked (which in turn implies lazyLoaded) +// 1101 | hasUnder, which implies unpacked ... +// 1111 | both hasMethods and hasUnder which implies unpacked ... +// +// To read the state of a named type, use [Named.stateHas]; to write, use [Named.setState]. +type stateMask uint32 -// Note: the order of states is relevant const ( - unresolved namedState = iota // tparams, underlying type and methods might be unavailable - resolved // resolve has run; methods might be unexpanded (for instances) - loaded // loader has run; constraints might be unexpanded (for generic types) - complete // all data is known + // initially, type parameters, RHS, underlying, and methods might be unavailable + lazyLoaded stateMask = 1 << iota // methods are available, but constraints might be unexpanded (for generic types) + unpacked // methods might be unexpanded (for instances) + hasMethods // methods are all expanded (for instances) + hasUnder // underlying type is available ) // NewNamed returns a new named type for the given type name, underlying type, and associated methods. @@ -161,31 +187,38 @@ func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named { if asNamed(underlying) != nil { panic("underlying type must not be *Named") } - return (*Checker)(nil).newNamed(obj, underlying, methods) + n := (*Checker)(nil).newNamed(obj, underlying, methods) + if underlying == nil { + n.allowNilRHS = true + n.allowNilUnderlying = true + } else { + n.SetUnderlying(underlying) + } + return n + } -// resolve resolves the type parameters, methods, and underlying type of n. +// unpack populates the type parameters, methods, and RHS of n. // -// For the purposes of resolution, there are three categories of named types: -// 1. Instantiated Types -// 2. Lazy Loaded Types -// 3. All Others +// For the purposes of unpacking, there are three categories of named types: +// 1. Lazy loaded types +// 2. Instantiated types +// 3. All others // // Note that the above form a partition. // -// Instantiated types: -// Type parameters, methods, and underlying type of n become accessible, -// though methods are lazily populated as needed. -// // Lazy loaded types: -// Type parameters, methods, and underlying type of n become accessible -// and are fully expanded. +// Type parameters, methods, and RHS of n become accessible and are fully +// expanded. +// +// Instantiated types: +// Type parameters, methods, and RHS of n become accessible, though methods +// are lazily populated as needed. // // All others: -// Effectively, nothing happens. The underlying type of n may still be -// a named type. -func (n *Named) resolve() *Named { - if n.state() > unresolved { // avoid locking below +// Effectively, nothing happens. +func (n *Named) unpack() *Named { + if n.stateHas(lazyLoaded | unpacked) { // avoid locking below return n } @@ -194,27 +227,29 @@ func (n *Named) resolve() *Named { n.mu.Lock() defer n.mu.Unlock() - if n.state() > unresolved { + // only atomic for consistency; we are holding the mutex + if n.stateHas(lazyLoaded | unpacked) { return n } + // underlying comes after unpacking, do not set it + defer (func() { assert(!n.stateHas(hasUnder)) })() + if n.inst != nil { - assert(n.underlying == nil) // n is an unresolved instance - assert(n.loader == nil) // instances are created by instantiation, in which case n.loader is nil + assert(n.fromRHS == nil) // instantiated types are not declared types + assert(n.loader == nil) // cannot import an instantiation orig := n.inst.orig - orig.resolve() - underlying := n.expandUnderlying() + orig.unpack() + n.fromRHS = n.expandRHS() n.tparams = orig.tparams - n.underlying = underlying - n.fromRHS = orig.fromRHS // for cycle detection if len(orig.methods) == 0 { - n.setState(complete) // nothing further to do + n.setState(lazyLoaded | unpacked | hasMethods) // nothing further to do n.inst.ctxt = nil } else { - n.setState(resolved) + n.setState(lazyLoaded | unpacked) } return n } @@ -227,43 +262,57 @@ func (n *Named) resolve() *Named { // methods would need to support reentrant calls though. It would // also make the API more future-proof towards further extensions. if n.loader != nil { - assert(n.underlying == nil) - assert(n.TypeArgs().Len() == 0) // instances are created by instantiation, in which case n.loader is nil + assert(n.fromRHS == nil) // not loaded yet + assert(n.inst == nil) // cannot import an instantiation tparams, underlying, methods, delayed := n.loader(n) n.loader = nil n.tparams = bindTParams(tparams) - n.underlying = underlying n.fromRHS = underlying // for cycle detection n.methods = methods - // advance state to avoid deadlock calling delayed functions - n.setState(loaded) - + n.setState(lazyLoaded) // avoid deadlock calling delayed functions for _, f := range delayed { f() } } - n.setState(complete) + n.setState(lazyLoaded | unpacked | hasMethods) return n } -// state atomically accesses the current state of the receiver. -func (n *Named) state() namedState { - return namedState(atomic.LoadUint32(&n.state_)) +// stateHas atomically determines whether the current state includes any active bit in sm. +func (n *Named) stateHas(m stateMask) bool { + return stateMask(atomic.LoadUint32(&n.state_))&m != 0 } -// setState atomically stores the given state for n. +// setState atomically sets the current state to include each active bit in sm. // Must only be called while holding n.mu. -func (n *Named) setState(state namedState) { - atomic.StoreUint32(&n.state_, uint32(state)) +func (n *Named) setState(m stateMask) { + atomic.OrUint32(&n.state_, uint32(m)) + // verify state transitions + if debug { + m := stateMask(atomic.LoadUint32(&n.state_)) + u := m&unpacked != 0 + // unpacked => lazyLoaded + if u { + assert(m&lazyLoaded != 0) + } + // hasMethods => unpacked + if m&hasMethods != 0 { + assert(u) + } + // hasUnder => unpacked + if m&hasUnder != 0 { + assert(u) + } + } } // newNamed is like NewNamed but with a *Checker receiver. -func (check *Checker) newNamed(obj *TypeName, underlying Type, methods []*Func) *Named { - typ := &Named{check: check, obj: obj, fromRHS: underlying, underlying: underlying, methods: methods} +func (check *Checker) newNamed(obj *TypeName, fromRHS Type, methods []*Func) *Named { + typ := &Named{check: check, obj: obj, fromRHS: fromRHS, methods: methods} if obj.typ == nil { obj.typ = typ } @@ -303,25 +352,13 @@ func (check *Checker) newNamedInstance(pos token.Pos, orig *Named, targs []Type, return typ } -func (t *Named) cleanup() { - assert(t.inst == nil || t.inst.orig.inst == nil) - // Ensure that every defined type created in the course of type-checking has - // either non-*Named underlying type, or is unexpanded. - // - // This guarantees that we don't leak any types whose underlying type is - // *Named, because any unexpanded instances will lazily compute their - // underlying type by substituting in the underlying type of their origin. - // The origin must have either been imported or type-checked and expanded - // here, and in either case its underlying type will be fully expanded. - switch t.underlying.(type) { - case nil: - if t.TypeArgs().Len() == 0 { - panic("nil underlying") - } - case *Named, *Alias: - t.under() // t.under may add entries to check.cleaners +func (n *Named) cleanup() { + // Instances can have a nil underlying at the end of type checking — they + // will lazily expand it as needed. All other types must have one. + if n.inst == nil { + n.Underlying() } - t.check = nil + n.check = nil } // Obj returns the type name for the declaration defining the named type t. For @@ -344,13 +381,13 @@ func (t *Named) Origin() *Named { // TypeParams returns the type parameters of the named type t, or nil. // The result is non-nil for an (originally) generic type even if it is instantiated. -func (t *Named) TypeParams() *TypeParamList { return t.resolve().tparams } +func (t *Named) TypeParams() *TypeParamList { return t.unpack().tparams } // SetTypeParams sets the type parameters of the named type t. // t must not have type arguments. func (t *Named) SetTypeParams(tparams []*TypeParam) { assert(t.inst == nil) - t.resolve().tparams = bindTParams(tparams) + t.unpack().tparams = bindTParams(tparams) } // TypeArgs returns the type arguments used to instantiate the named type t. @@ -363,7 +400,7 @@ func (t *Named) TypeArgs() *TypeList { // NumMethods returns the number of explicit methods defined for t. func (t *Named) NumMethods() int { - return len(t.Origin().resolve().methods) + return len(t.Origin().unpack().methods) } // Method returns the i'th method of named type t for 0 <= i < t.NumMethods(). @@ -378,13 +415,13 @@ func (t *Named) NumMethods() int { // But the specific ordering is not specified and must not be relied on as it may // change in the future. func (t *Named) Method(i int) *Func { - t.resolve() + t.unpack() - if t.state() >= complete { + if t.stateHas(hasMethods) { return t.methods[i] } - assert(t.inst != nil) // only instances should have incomplete methods + assert(t.inst != nil) // only instances should have unexpanded methods orig := t.inst.orig t.mu.Lock() @@ -401,9 +438,9 @@ func (t *Named) Method(i int) *Func { t.inst.expandedMethods++ // Check if we've created all methods at this point. If we have, mark the - // type as fully expanded. + // type as having all of its methods. if t.inst.expandedMethods == len(orig.methods) { - t.setState(complete) + t.setState(hasMethods) t.inst.ctxt = nil // no need for a context anymore } } @@ -472,18 +509,25 @@ func (t *Named) expandMethod(i int) *Func { // SetUnderlying sets the underlying type and marks t as complete. // t must not have type arguments. -func (t *Named) SetUnderlying(underlying Type) { +func (t *Named) SetUnderlying(u Type) { assert(t.inst == nil) - if underlying == nil { + if u == nil { panic("underlying type must not be nil") } - if asNamed(underlying) != nil { + if asNamed(u) != nil { panic("underlying type must not be *Named") } - t.resolve().underlying = underlying - if t.fromRHS == nil { - t.fromRHS = underlying // for cycle detection - } + // be careful to uphold the state invariants + t.mu.Lock() + defer t.mu.Unlock() + + t.fromRHS = u + t.allowNilRHS = false + t.setState(lazyLoaded | unpacked | hasMethods) // TODO(markfreeman): Why hasMethods? + + t.underlying = u + t.allowNilUnderlying = false + t.setState(hasUnder) } // AddMethod adds method m unless it is already in the method list. @@ -492,7 +536,7 @@ func (t *Named) SetUnderlying(underlying Type) { func (t *Named) AddMethod(m *Func) { assert(samePkg(t.obj.pkg, m.pkg)) assert(t.inst == nil) - t.resolve() + t.unpack() if t.methodIndex(m.name, false) < 0 { t.methods = append(t.methods, m) } @@ -521,14 +565,39 @@ func (t *Named) methodIndex(name string, foldCase bool) int { return -1 } +// rhs returns [Named.fromRHS]. +// +// In debug mode, it also asserts that n is in an appropriate state. +func (n *Named) rhs() Type { + if debug { + assert(n.stateHas(lazyLoaded | unpacked)) + } + return n.fromRHS +} + // Underlying returns the [underlying type] of the named type t, resolving all // forwarding declarations. Underlying types are never Named, TypeParam, or // Alias types. // // [underlying type]: https://go.dev/ref/spec#Underlying_types. -func (t *Named) Underlying() Type { - // TODO(gri) Investigate if Unalias can be moved to where underlying is set. - return Unalias(t.resolve().underlying) +func (n *Named) Underlying() Type { + n.unpack() + + // The gccimporter depends on writing a nil underlying via NewNamed and + // immediately reading it back. Rather than putting that in Named.under + // and complicating things there, we just check for that special case here. + if n.rhs() == nil { + assert(n.allowNilRHS) + if n.allowNilUnderlying { + return nil + } + } + + if !n.stateHas(hasUnder) { // minor performance optimization + n.resolveUnderlying() + } + + return n.underlying } func (t *Named) String() string { return TypeString(t, nil) } @@ -539,96 +608,78 @@ func (t *Named) String() string { return TypeString(t, nil) } // TODO(rfindley): reorganize the loading and expansion methods under this // heading. -// under returns the expanded underlying type of n0; possibly by following -// forward chains of named types. If an underlying type is found, resolve -// the chain by setting the underlying type for each defined type in the -// chain before returning it. If no underlying type is found or a cycle -// is detected, the result is Typ[Invalid]. If a cycle is detected and -// n0.check != nil, the cycle is reported. +// resolveUnderlying computes the underlying type of n. If n already has an +// underlying type, nothing happens. // -// This is necessary because the underlying type of named may be itself a -// named type that is incomplete: +// It does so by following RHS type chains for alias and named types. If any +// other type T is found, each named type in the chain has its underlying +// type set to T. Aliases are skipped because their underlying type is +// not memoized. // -// type ( -// A B -// B *C -// C A -// ) -// -// The type of C is the (named) type of A which is incomplete, -// and which has as its underlying type the named type B. -func (n0 *Named) under() Type { - u := n0.Underlying() +// resolveUnderlying assumes that there are no direct cycles; if there were +// any, they were broken (by setting the respective types to invalid) during +// the directCycles check phase. +func (n *Named) resolveUnderlying() { + assert(n.stateHas(unpacked)) - // If the underlying type of a defined type is not a defined - // (incl. instance) type, then that is the desired underlying - // type. - var n1 *Named - switch u1 := u.(type) { - case nil: - // After expansion via Underlying(), we should never encounter a nil - // underlying. - panic("nil underlying") - default: - // common case - return u - case *Named: - // handled below - n1 = u1 + var seen map[*Named]bool // for debugging only + if debug { + seen = make(map[*Named]bool) } - if n0.check == nil { - panic("Named.check == nil but type is incomplete") - } - - // Invariant: after this point n0 as well as any named types in its - // underlying chain should be set up when this function exits. - check := n0.check - n := n0 - - seen := make(map[*Named]int) // types that need their underlying type resolved - var path []Object // objects encountered, for cycle reporting - -loop: - for { - seen[n] = len(seen) - path = append(path, n.obj) - n = n1 - if i, ok := seen[n]; ok { - // cycle - check.cycleError(path[i:], firstInSrc(path[i:])) - u = Typ[Invalid] - break - } - u = n.Underlying() - switch u1 := u.(type) { + var path []*Named + var u Type + for rhs := Type(n); u == nil; { + switch t := rhs.(type) { case nil: u = Typ[Invalid] - break loop - default: - break loop + + case *Alias: + rhs = unalias(t) + case *Named: - // Continue collecting *Named types in the chain. - n1 = u1 + if debug { + assert(!seen[t]) + seen[t] = true + } + + // don't recalculate the underlying + if t.stateHas(hasUnder) { + u = t.underlying + break + } + + if debug { + seen[t] = true + } + path = append(path, t) + + t.unpack() + assert(t.rhs() != nil || t.allowNilRHS) + rhs = t.rhs() + + default: + u = rhs // any type literal or predeclared type works } } - for n := range seen { - // We should never have to update the underlying type of an imported type; - // those underlying types should have been resolved during the import. - // Also, doing so would lead to a race condition (was go.dev/issue/31749). - // Do this check always, not just in debug mode (it's cheap). - if n.obj.pkg != check.pkg { - panic("imported type with unresolved underlying type") - } - n.underlying = u + for _, t := range path { + func() { + t.mu.Lock() + defer t.mu.Unlock() + // Careful, t.underlying has lock-free readers. Since we might be racing + // another call to resolveUnderlying, we have to avoid overwriting + // t.underlying. Otherwise, the race detector will be tripped. + if !t.stateHas(hasUnder) { + t.underlying = u + t.setState(hasUnder) + } + }() } - - return u } func (n *Named) lookupMethod(pkg *Package, name string, foldCase bool) (int, *Func) { - n.resolve() + n.unpack() if samePkg(n.obj.pkg, pkg) || isExported(name) || foldCase { // If n is an instance, we may not have yet instantiated all of its methods. // Look up the method index in orig, and only instantiate method at the @@ -649,78 +700,106 @@ func (check *Checker) context() *Context { return check.ctxt } -// expandUnderlying substitutes type arguments in the underlying type n.orig, -// returning the result. Returns Typ[Invalid] if there was an error. -func (n *Named) expandUnderlying() Type { +// expandRHS crafts a synthetic RHS for an instantiated type using the RHS of +// its origin type (which must be a generic type). +// +// Suppose that we had: +// +// type T[P any] struct { +// f P +// } +// +// type U T[int] +// +// When we go to U, we observe T[int]. Since T[int] is an instantiation, it has no +// declaration. Here, we craft a synthetic RHS for T[int] as if it were declared, +// somewhat similar to: +// +// type T[int] struct { +// f int +// } +// +// And note that the synthetic RHS here is the same as the underlying for U. Now, +// consider: +// +// type T[_ any] U +// type U int +// type V T[U] +// +// The synthetic RHS for T[U] becomes: +// +// type T[U] U +// +// Whereas the underlying of V is int, not U. +func (n *Named) expandRHS() (rhs Type) { check := n.check if check != nil && check.conf._Trace { - check.trace(n.obj.pos, "-- Named.expandUnderlying %s", n) + check.trace(n.obj.pos, "-- Named.expandRHS %s", n) check.indent++ defer func() { check.indent-- - check.trace(n.obj.pos, "=> %s (tparams = %s, under = %s)", n, n.tparams.list(), n.underlying) + check.trace(n.obj.pos, "=> %s (rhs = %s)", n, rhs) }() } - assert(n.inst.orig.underlying != nil) + assert(!n.stateHas(unpacked)) + assert(n.inst.orig.stateHas(lazyLoaded | unpacked)) + if n.inst.ctxt == nil { n.inst.ctxt = NewContext() } + ctxt := n.inst.ctxt orig := n.inst.orig + targs := n.inst.targs + tpars := orig.tparams - if asNamed(orig.underlying) != nil { - // We should only get a Named underlying type here during type checking - // (for example, in recursive type declarations). - assert(check != nil) - } - - if orig.tparams.Len() != targs.Len() { - // Mismatching arg and tparam length may be checked elsewhere. + if targs.Len() != tpars.Len() { return Typ[Invalid] } - // Ensure that an instance is recorded before substituting, so that we - // resolve n for any recursive references. - h := n.inst.ctxt.instanceHash(orig, targs.list()) - n2 := n.inst.ctxt.update(h, orig, n.TypeArgs().list(), n) - assert(n == n2) + h := ctxt.instanceHash(orig, targs.list()) + u := ctxt.update(h, orig, targs.list(), n) // block fixed point infinite instantiation + assert(n == u) - smap := makeSubstMap(orig.tparams.list(), targs.list()) - var ctxt *Context + m := makeSubstMap(tpars.list(), targs.list()) if check != nil { ctxt = check.context() } - underlying := n.check.subst(n.obj.pos, orig.underlying, smap, n, ctxt) - // If the underlying type of n is an interface, we need to set the receiver of - // its methods accurately -- we set the receiver of interface methods on - // the RHS of a type declaration to the defined type. - if iface, _ := underlying.(*Interface); iface != nil { + + rhs = check.subst(n.obj.pos, orig.rhs(), m, n, ctxt) + + // TODO(markfreeman): Can we handle this in substitution? + // If the RHS is an interface, we must set the receiver of interface methods + // to the named type. + if iface, _ := rhs.(*Interface); iface != nil { if methods, copied := replaceRecvType(iface.methods, orig, n); copied { - // If the underlying type doesn't actually use type parameters, it's - // possible that it wasn't substituted. In this case we need to create - // a new *Interface before modifying receivers. - if iface == orig.underlying { - old := iface - iface = check.newInterface() - iface.embeddeds = old.embeddeds - assert(old.complete) // otherwise we are copying incomplete data - iface.complete = old.complete - iface.implicit = old.implicit // should be false but be conservative - underlying = iface + // If the RHS doesn't use type parameters, it may not have been + // substituted; we need to craft a new interface first. + if iface == orig.rhs() { + assert(iface.complete) // otherwise we are copying incomplete data + + crafted := check.newInterface() + crafted.complete = true + crafted.implicit = false + crafted.embeddeds = iface.embeddeds + + iface = crafted } iface.methods = methods iface.tset = nil // recompute type set with new methods - // If check != nil, check.newInterface will have saved the interface for later completion. - if check == nil { // golang/go#61561: all newly created interfaces must be fully evaluated + // go.dev/issue/61561: We have to complete the interface even without a checker. + if check == nil { iface.typeSet() } + + return iface } } - return underlying + return rhs } // safeUnderlying returns the underlying type of typ without expanding diff --git a/src/go/types/object.go b/src/go/types/object.go index 823c03c7fd9..7bf705cb817 100644 --- a/src/go/types/object.go +++ b/src/go/types/object.go @@ -295,10 +295,11 @@ func NewTypeName(pos token.Pos, pkg *Package, name string, typ Type) *TypeName { } // NewTypeNameLazy returns a new defined type like NewTypeName, but it -// lazily calls resolve to finish constructing the Named object. +// lazily calls unpack to finish constructing the Named object. func _NewTypeNameLazy(pos token.Pos, pkg *Package, name string, load func(*Named) ([]*TypeParam, Type, []*Func, []func())) *TypeName { obj := NewTypeName(pos, pkg, name, nil) - NewNamed(obj, nil, nil).loader = load + n := (*Checker)(nil).newNamed(obj, nil, nil) + n.loader = load return obj } @@ -330,7 +331,7 @@ func (obj *TypeName) IsAlias() bool { } } -// A Variable represents a declared variable (including function parameters and results, and struct fields). +// A Var represents a declared variable (including function parameters and results, and struct fields). type Var struct { object origin *Var // if non-nil, the Var from which this one was instantiated @@ -641,7 +642,7 @@ func writeObject(buf *bytes.Buffer, obj Object, qf Qualifier) { } else { // TODO(gri) should this be fromRHS for *Named? // (See discussion in #66559.) - typ = under(typ) + typ = typ.Underlying() } } diff --git a/src/go/types/operand.go b/src/go/types/operand.go index d933c173ff6..9abeaafae0c 100644 --- a/src/go/types/operand.go +++ b/src/go/types/operand.go @@ -198,7 +198,7 @@ func operandString(x *operand, qf Qualifier) string { what := compositeKind(x.typ) if what == "" { // x.typ must be basic type - what = under(x.typ).(*Basic).name + what = x.typ.Underlying().(*Basic).name } desc += what + " " } @@ -233,7 +233,7 @@ func operandString(x *operand, qf Qualifier) string { // ("array", "slice", etc.) or the empty string if typ is not // composite but a basic type. func compositeKind(typ Type) string { - switch under(typ).(type) { + switch typ.Underlying().(type) { case *Basic: return "" case *Array: @@ -323,8 +323,8 @@ func (x *operand) assignableTo(check *Checker, T Type, cause *string) (bool, Cod return true, 0 } - Vu := under(V) - Tu := under(T) + Vu := V.Underlying() + Tu := T.Underlying() Vp, _ := V.(*TypeParam) Tp, _ := T.(*TypeParam) diff --git a/src/go/types/predicates.go b/src/go/types/predicates.go index 45f28726eec..7a48c2c96c2 100644 --- a/src/go/types/predicates.go +++ b/src/go/types/predicates.go @@ -31,11 +31,11 @@ func isString(t Type) bool { return isBasic(t, IsString) } func isIntegerOrFloat(t Type) bool { return isBasic(t, IsInteger|IsFloat) } func isConstType(t Type) bool { return isBasic(t, IsConstType) } -// isBasic reports whether under(t) is a basic type with the specified info. +// isBasic reports whether t.Underlying() is a basic type with the specified info. // If t is a type parameter the result is false; i.e., // isBasic does not look inside a type parameter. func isBasic(t Type, info BasicInfo) bool { - u, _ := under(t).(*Basic) + u, _ := t.Underlying().(*Basic) return u != nil && u.info&info != 0 } @@ -51,7 +51,7 @@ func allString(t Type) bool { return allBasic(t, IsString) } func allOrdered(t Type) bool { return allBasic(t, IsOrdered) } func allNumericOrString(t Type) bool { return allBasic(t, IsNumeric|IsString) } -// allBasic reports whether under(t) is a basic type with the specified info. +// allBasic reports whether t.Underlying() is a basic type with the specified info. // If t is a type parameter, the result is true if isBasic(t, info) is true // for all specific types of the type parameter's type set. func allBasic(t Type, info BasicInfo) bool { @@ -88,7 +88,7 @@ func isTypeLit(t Type) bool { // Safe to call from types that are not fully set up. func isTyped(t Type) bool { // Alias and named types cannot denote untyped types - // so there's no need to call Unalias or under, below. + // so there's no need to call Unalias or Underlying, below. b, _ := t.(*Basic) return b == nil || b.info&IsUntyped == 0 } @@ -103,14 +103,14 @@ func isUntyped(t Type) bool { // Safe to call from types that are not fully set up. func isUntypedNumeric(t Type) bool { // Alias and named types cannot denote untyped types - // so there's no need to call Unalias or under, below. + // so there's no need to call Unalias or Underlying, below. b, _ := t.(*Basic) return b != nil && b.info&IsUntyped != 0 && b.info&IsNumeric != 0 } // IsInterface reports whether t is an interface type. func IsInterface(t Type) bool { - _, ok := under(t).(*Interface) + _, ok := t.Underlying().(*Interface) return ok } @@ -166,7 +166,7 @@ func comparableType(T Type, dynamic bool, seen map[Type]bool) *typeError { } seen[T] = true - switch t := under(T).(type) { + switch t := T.Underlying().(type) { case *Basic: // assume invalid types to be comparable to avoid follow-up errors if t.kind == UntypedNil { @@ -209,7 +209,7 @@ func comparableType(T Type, dynamic bool, seen map[Type]bool) *typeError { // hasNil reports whether type t includes the nil value. func hasNil(t Type) bool { - switch u := under(t).(type) { + switch u := t.Underlying().(type) { case *Basic: return u.kind == UnsafePointer case *Slice, *Pointer, *Signature, *Map, *Chan: @@ -522,7 +522,7 @@ func identicalInstance(xorig Type, xargs []Type, yorig Type, yargs []Type) bool // for untyped nil is untyped nil. func Default(t Type) Type { // Alias and named types cannot denote untyped types - // so there's no need to call Unalias or under, below. + // so there's no need to call Unalias or Underlying, below. if t, _ := t.(*Basic); t != nil { switch t.kind { case UntypedBool: diff --git a/src/go/types/range.go b/src/go/types/range.go index 303d001c72d..d38150dea77 100644 --- a/src/go/types/range.go +++ b/src/go/types/range.go @@ -38,7 +38,7 @@ func (check *Checker) rangeStmt(inner stmtContext, rangeStmt *ast.RangeStmt, noN check.expr(nil, &x, rangeVar) if isTypes2 && x.mode != invalid && sValue == nil && !check.hasCallOrRecv { - if t, ok := arrayPtrDeref(under(x.typ)).(*Array); ok { + if t, ok := arrayPtrDeref(x.typ.Underlying()).(*Array); ok { for { // Put constant info on the thing inside parentheses. // That's where (*../noder/writer).expr expects it. diff --git a/src/go/types/resolver.go b/src/go/types/resolver.go index dcf863b029f..a8d11c2aa52 100644 --- a/src/go/types/resolver.go +++ b/src/go/types/resolver.go @@ -499,6 +499,19 @@ func (check *Checker) collectObjects() { } } +// sortObjects sorts package-level objects by source-order for reproducible processing +func (check *Checker) sortObjects() { + check.objList = make([]Object, len(check.objMap)) + i := 0 + for obj := range check.objMap { + check.objList[i] = obj + i++ + } + slices.SortFunc(check.objList, func(a, b Object) int { + return cmp.Compare(a.order(), b.order()) + }) +} + // unpackRecv unpacks a receiver type expression and returns its components: ptr indicates // whether rtyp is a pointer receiver, base is the receiver base type expression stripped // of its type parameters (if any), and tparams are its type parameter names, if any. The @@ -621,19 +634,8 @@ func (check *Checker) resolveBaseTypeName(ptr bool, name *ast.Ident) (ptr_ bool, // packageObjects typechecks all package objects, but not function bodies. func (check *Checker) packageObjects() { - // process package objects in source order for reproducible results - objList := make([]Object, len(check.objMap)) - i := 0 - for obj := range check.objMap { - objList[i] = obj - i++ - } - slices.SortFunc(objList, func(a, b Object) int { - return cmp.Compare(a.order(), b.order()) - }) - // add new methods to already type-checked types (from a prior Checker.Files call) - for _, obj := range objList { + for _, obj := range check.objList { if obj, _ := obj.(*TypeName); obj != nil && obj.typ != nil { check.collectMethods(obj) } @@ -656,7 +658,7 @@ func (check *Checker) packageObjects() { // its Type is Invalid. // // Investigate and reenable this branch. - for _, obj := range objList { + for _, obj := range check.objList { check.objDecl(obj, nil) } } else { @@ -668,7 +670,7 @@ func (check *Checker) packageObjects() { var aliasList []*TypeName var othersList []Object // everything that's not a type // phase 1: non-alias type declarations - for _, obj := range objList { + for _, obj := range check.objList { if tname, _ := obj.(*TypeName); tname != nil { if check.objMap[tname].tdecl.Assign.IsValid() { aliasList = append(aliasList, tname) diff --git a/src/go/types/signature.go b/src/go/types/signature.go index fa41c797b29..2f8be54e171 100644 --- a/src/go/types/signature.go +++ b/src/go/types/signature.go @@ -226,7 +226,7 @@ func (check *Checker) collectRecv(rparam *ast.Field, scopePos token.Pos) (*Var, case *Alias: // Methods on generic aliases are not permitted. // Only report an error if the alias type is valid. - if isValid(unalias(t)) { + if isValid(t) { check.errorf(rbase, InvalidRecv, "cannot define new methods on generic alias type %s", t) } // Ok to continue but do not set basetype in this case so that @@ -461,7 +461,7 @@ func (check *Checker) validRecv(pos positioner, recv *Var) { break } var cause string - switch u := T.under().(type) { + switch u := T.Underlying().(type) { case *Basic: // unsafe.Pointer is treated like a regular pointer if u.kind == UnsafePointer { diff --git a/src/go/types/sizeof_test.go b/src/go/types/sizeof_test.go index fa07eb10f19..4ff255ffa02 100644 --- a/src/go/types/sizeof_test.go +++ b/src/go/types/sizeof_test.go @@ -30,7 +30,7 @@ func TestSizeof(t *testing.T) { {Interface{}, 40, 80}, {Map{}, 16, 32}, {Chan{}, 12, 24}, - {Named{}, 60, 112}, + {Named{}, 64, 120}, {TypeParam{}, 28, 48}, {term{}, 12, 24}, diff --git a/src/go/types/sizes.go b/src/go/types/sizes.go index 51ea224f0b2..e2ff047bc85 100644 --- a/src/go/types/sizes.go +++ b/src/go/types/sizes.go @@ -57,7 +57,7 @@ func (s *StdSizes) Alignof(T Type) (result int64) { // For arrays and structs, alignment is defined in terms // of alignment of the elements and fields, respectively. - switch t := under(T).(type) { + switch t := T.Underlying().(type) { case *Array: // spec: "For a variable x of array type: unsafe.Alignof(x) // is the same as unsafe.Alignof(x[0]), but at least 1." @@ -165,7 +165,7 @@ var basicSizes = [...]byte{ } func (s *StdSizes) Sizeof(T Type) int64 { - switch t := under(T).(type) { + switch t := T.Underlying().(type) { case *Basic: assert(isTyped(T)) k := t.kind @@ -310,7 +310,7 @@ func (conf *Config) offsetsof(T *Struct) []int64 { func (conf *Config) offsetof(T Type, index []int) int64 { var offs int64 for _, i := range index { - s := under(T).(*Struct) + s := T.Underlying().(*Struct) d := conf.offsetsof(s)[i] if d < 0 { return -1 diff --git a/src/go/types/sizes_test.go b/src/go/types/sizes_test.go index 157faf87d4c..e0ca14e11b2 100644 --- a/src/go/types/sizes_test.go +++ b/src/go/types/sizes_test.go @@ -187,7 +187,6 @@ func main() { func TestGCSizes(t *testing.T) { types.DefPredeclaredTestFuncs() for _, tc := range gcSizesTests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() conf := types.Config{ diff --git a/src/go/types/stdlib_test.go b/src/go/types/stdlib_test.go index 8b4490084ef..da2b38403fe 100644 --- a/src/go/types/stdlib_test.go +++ b/src/go/types/stdlib_test.go @@ -70,8 +70,6 @@ func TestStdlib(t *testing.T) { var wg sync.WaitGroup for dir := range dirFiles { - dir := dir - cpulimit <- struct{}{} wg.Add(1) go func() { diff --git a/src/go/types/struct.go b/src/go/types/struct.go index a6970832c7c..0133eb450db 100644 --- a/src/go/types/struct.go +++ b/src/go/types/struct.go @@ -137,7 +137,7 @@ func (check *Checker) structType(styp *Struct, e *ast.StructType) { // Because we have a name, typ must be of the form T or *T, where T is the name // of a (named or alias) type, and t (= deref(typ)) must be the type of T. // We must delay this check to the end because we don't want to instantiate - // (via under(t)) a possibly incomplete type. + // (via t.Underlying()) a possibly incomplete type. // for use in the closure below embeddedTyp := typ @@ -145,7 +145,7 @@ func (check *Checker) structType(styp *Struct, e *ast.StructType) { check.later(func() { t, isPtr := deref(embeddedTyp) - switch u := under(t).(type) { + switch u := t.Underlying().(type) { case *Basic: if !isValid(t) { // error was reported before diff --git a/src/go/types/typeparam.go b/src/go/types/typeparam.go index 2ffef8f6133..eafcb1c145d 100644 --- a/src/go/types/typeparam.go +++ b/src/go/types/typeparam.go @@ -116,7 +116,7 @@ func (t *TypeParam) iface() *Interface { // determine constraint interface var ityp *Interface - switch u := under(bound).(type) { + switch u := bound.Underlying().(type) { case *Basic: if !isValid(u) { // error is reported elsewhere diff --git a/src/go/types/typeset.go b/src/go/types/typeset.go index 46ed5ce1802..3a61c307561 100644 --- a/src/go/types/typeset.go +++ b/src/go/types/typeset.go @@ -117,13 +117,13 @@ func (s *_TypeSet) all(f func(t, u Type) bool) bool { for _, t := range s.terms { assert(t.typ != nil) - // Unalias(x) == under(x) for ~x terms + // Unalias(x) == x.Underlying() for ~x terms u := Unalias(t.typ) if !t.tilde { - u = under(u) + u = u.Underlying() } if debug { - assert(Identical(u, under(u))) + assert(Identical(u, u.Underlying())) } if !f(t.typ, u) { return false @@ -267,7 +267,7 @@ func computeInterfaceTypeSet(check *Checker, pos token.Pos, ityp *Interface) *_T } var comparable bool var terms termlist - switch u := under(typ).(type) { + switch u := typ.Underlying().(type) { case *Interface: // For now we don't permit type parameters as constraints. assert(!isTypeParam(typ)) @@ -383,7 +383,7 @@ func computeUnionTypeSet(check *Checker, unionSets map[*Union]*_TypeSet, pos tok var allTerms termlist for _, t := range utyp.terms { var terms termlist - u := under(t.typ) + u := t.typ.Underlying() if ui, _ := u.(*Interface); ui != nil { // For now we don't permit type parameters as constraints. assert(!isTypeParam(t.typ)) diff --git a/src/go/types/typeset_test.go b/src/go/types/typeset_test.go index 51560924839..a4feb7469b0 100644 --- a/src/go/types/typeset_test.go +++ b/src/go/types/typeset_test.go @@ -65,7 +65,7 @@ func TestTypeSetString(t *testing.T) { if obj == nil { t.Fatalf("%s: T not found (invalid test case)", body) } - T, ok := under(obj.Type()).(*Interface) + T, ok := obj.Type().Underlying().(*Interface) if !ok { t.Fatalf("%s: %v is not an interface (invalid test case)", body, obj) } diff --git a/src/go/types/typestring.go b/src/go/types/typestring.go index 804e80407ef..bd13459832d 100644 --- a/src/go/types/typestring.go +++ b/src/go/types/typestring.go @@ -458,7 +458,7 @@ func (w *typeWriter) tuple(tup *Tuple, variadic bool) { } else { // special case: // append(s, "foo"...) leads to signature func([]byte, string...) - if t, _ := under(typ).(*Basic); t == nil || t.kind != String { + if t, _ := typ.Underlying().(*Basic); t == nil || t.kind != String { w.error("expected string type") continue } diff --git a/src/go/types/typeterm.go b/src/go/types/typeterm.go index 1cd4e1651f3..b9ba8f38f91 100644 --- a/src/go/types/typeterm.go +++ b/src/go/types/typeterm.go @@ -118,7 +118,7 @@ func (x *term) includes(t Type) bool { u := t if x.tilde { - u = under(u) + u = u.Underlying() } return Identical(x.typ, u) } @@ -158,11 +158,11 @@ func (x *term) disjoint(y *term) bool { } ux := x.typ if y.tilde { - ux = under(ux) + ux = ux.Underlying() } uy := y.typ if x.tilde { - uy = under(uy) + uy = uy.Underlying() } return !Identical(ux, uy) } diff --git a/src/go/types/typexpr.go b/src/go/types/typexpr.go index c040ee2a29c..88ec4b77fca 100644 --- a/src/go/types/typexpr.go +++ b/src/go/types/typexpr.go @@ -168,11 +168,11 @@ func (check *Checker) validVarType(e ast.Expr, typ Type) { return } - // We don't want to call under() or complete interfaces while we are in + // We don't want to call typ.Underlying() or complete interfaces while we are in // the middle of type-checking parameter declarations that might belong // to interface methods. Delay this check to the end of type-checking. check.later(func() { - if t, _ := under(typ).(*Interface); t != nil { + if t, _ := typ.Underlying().(*Interface); t != nil { tset := computeInterfaceTypeSet(check, e.Pos(), t) // TODO(gri) is this the correct position? if !tset.IsMethodSet() { if tset.comparable { @@ -237,7 +237,7 @@ func (check *Checker) typInternal(e0 ast.Expr, def *TypeName) (T Type) { check.indent-- var under Type if T != nil { - // Calling under() here may lead to endless instantiations. + // Calling T.Underlying() here may lead to endless instantiations. // Test case: type T[P any] *T[P] under = safeUnderlying(T) } @@ -421,7 +421,7 @@ func setDefType(def *TypeName, typ Type) { case *Basic: assert(t == Typ[Invalid]) case *Named: - t.underlying = typ + t.fromRHS = typ default: panic(fmt.Sprintf("unexpected type %T", t)) } diff --git a/src/go/types/under.go b/src/go/types/under.go index 43bf0ad07cd..6056b2e4829 100644 --- a/src/go/types/under.go +++ b/src/go/types/under.go @@ -9,19 +9,8 @@ package types import "iter" -// under returns the true expanded underlying type. -// If it doesn't exist, the result is Typ[Invalid]. -// under must only be called when a type is known -// to be fully set up. -func under(t Type) Type { - if t := asNamed(t); t != nil { - return t.under() - } - return t.Underlying() -} - // If typ is a type parameter, underIs returns the result of typ.underIs(f). -// Otherwise, underIs returns the result of f(under(typ)). +// Otherwise, underIs returns the result of f(typ.Underlying()). func underIs(typ Type, f func(Type) bool) bool { return all(typ, func(_, u Type) bool { return f(u) @@ -34,7 +23,7 @@ func all(t Type, f func(t, u Type) bool) bool { if p, _ := Unalias(t).(*TypeParam); p != nil { return p.typeset(f) } - return f(t, under(t)) + return f(t, t.Underlying()) } // typeset is an iterator over the (type/underlying type) pairs of the diff --git a/src/go/types/unify.go b/src/go/types/unify.go index abcbab433a1..bf072c09d9f 100644 --- a/src/go/types/unify.go +++ b/src/go/types/unify.go @@ -144,7 +144,7 @@ func (u *unifier) unify(x, y Type, mode unifyMode) bool { return u.nify(x, y, mode, nil) } -func (u *unifier) tracef(format string, args ...interface{}) { +func (u *unifier) tracef(format string, args ...any) { fmt.Println(strings.Repeat(". ", u.depth) + sprintf(nil, nil, true, format, args...)) } @@ -273,7 +273,7 @@ func (u *unifier) inferred(tparams []*TypeParam) []Type { // it is a non-type parameter interface. Otherwise it returns nil. func asInterface(x Type) (i *Interface) { if _, ok := Unalias(x).(*TypeParam); !ok { - i, _ = under(x).(*Interface) + i, _ = x.Underlying().(*Interface) } return i } @@ -342,7 +342,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) { if traceInference { u.tracef("%s ≡ under %s", x, ny) } - y = ny.under() + y = ny.Underlying() // Per the spec, a defined type cannot have an underlying type // that is a type parameter. assert(!isTypeParam(y)) @@ -433,7 +433,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) { u.set(px, y) default: // Neither x nor y are defined types. - if yc, _ := under(y).(*Chan); yc != nil && yc.dir != SendRecv { + if yc, _ := y.Underlying().(*Chan); yc != nil && yc.dir != SendRecv { // y is a directed channel type: select y. u.set(px, y) } @@ -782,7 +782,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) { } // If y is a defined type, it may not match against cx which // is an underlying type (incl. int, string, etc.). Use assign - // mode here so that the unifier automatically takes under(y) + // mode here so that the unifier automatically uses y.Underlying() // if necessary. return u.nify(cx, yorig, assign, p) } diff --git a/src/go/types/union.go b/src/go/types/union.go index 30365e30bc2..3f7efdf1f38 100644 --- a/src/go/types/union.go +++ b/src/go/types/union.go @@ -94,7 +94,7 @@ func parseUnion(check *Checker, uexpr ast.Expr) Type { continue } - u := under(t.typ) + u := t.typ.Underlying() f, _ := u.(*Interface) if t.tilde { if f != nil { diff --git a/src/go/types/universe.go b/src/go/types/universe.go index 4753111c118..70935dc35ff 100644 --- a/src/go/types/universe.go +++ b/src/go/types/universe.go @@ -119,7 +119,7 @@ func defPredeclaredTypes() { { obj := NewTypeName(nopos, nil, "error", nil) obj.setColor(black) - typ := NewNamed(obj, nil, nil) + typ := (*Checker)(nil).newNamed(obj, nil, nil) // error.Error() string recv := newVar(RecvVar, nopos, nil, "", typ) @@ -131,7 +131,8 @@ func defPredeclaredTypes() { ityp := &Interface{methods: []*Func{err}, complete: true} computeInterfaceTypeSet(nil, nopos, ityp) // prevent races due to lazy computation of tset - typ.SetUnderlying(ityp) + typ.fromRHS = ityp + typ.Underlying() def(obj) } @@ -139,12 +140,13 @@ func defPredeclaredTypes() { { obj := NewTypeName(nopos, nil, "comparable", nil) obj.setColor(black) - typ := NewNamed(obj, nil, nil) + typ := (*Checker)(nil).newNamed(obj, nil, nil) // interface{} // marked as comparable ityp := &Interface{complete: true, tset: &_TypeSet{nil, allTermlist, true}} - typ.SetUnderlying(ityp) + typ.fromRHS = ityp + typ.Underlying() def(obj) } } diff --git a/src/go/types/validtype.go b/src/go/types/validtype.go index f87b82c439e..c23316da82f 100644 --- a/src/go/types/validtype.go +++ b/src/go/types/validtype.go @@ -94,13 +94,6 @@ func (check *Checker) validType0(pos token.Pos, typ Type, nest, path []*Named) b // break // } - // Don't report a 2nd error if we already know the type is invalid - // (e.g., if a cycle was detected earlier, via under). - // Note: ensure that t.orig is fully resolved by calling Underlying(). - if !isValid(t.Underlying()) { - return false - } - // If the current type t is also found in nest, (the memory of) t is // embedded in itself, indicating an invalid recursive type. for _, e := range nest { @@ -128,8 +121,9 @@ func (check *Checker) validType0(pos token.Pos, typ Type, nest, path []*Named) b // are not yet available to other goroutines). assert(t.obj.pkg == check.pkg) assert(t.Origin().obj.pkg == check.pkg) - t.underlying = Typ[Invalid] - t.Origin().underlying = Typ[Invalid] + + // let t become invalid when it is unpacked + t.Origin().fromRHS = Typ[Invalid] // Find the starting point of the cycle and report it. // Because each type in nest must also appear in path (see invariant below), @@ -150,7 +144,8 @@ func (check *Checker) validType0(pos token.Pos, typ Type, nest, path []*Named) b // Every type added to nest is also added to path; thus every type that is in nest // must also be in path (invariant). But not every type in path is in nest, since // nest may be pruned (see below, *TypeParam case). - if !check.validType0(pos, t.Origin().fromRHS, append(nest, t), append(path, t)) { + t.Origin().unpack() + if !check.validType0(pos, t.Origin().rhs(), append(nest, t), append(path, t)) { return false } diff --git a/src/go/types/version.go b/src/go/types/version.go index 81331103980..5ba12c406a7 100644 --- a/src/go/types/version.go +++ b/src/go/types/version.go @@ -58,7 +58,7 @@ func (check *Checker) allowVersion(want goVersion) bool { // verifyVersionf is like allowVersion but also accepts a format string and arguments // which are used to report a version error if allowVersion returns false. -func (check *Checker) verifyVersionf(at positioner, v goVersion, format string, args ...interface{}) bool { +func (check *Checker) verifyVersionf(at positioner, v goVersion, format string, args ...any) bool { if !check.allowVersion(v) { check.versionErrorf(at, v, format, args...) return false diff --git a/src/hash/maphash/maphash_runtime.go b/src/hash/maphash/maphash_runtime.go index 36ac638071d..5ae23a02186 100644 --- a/src/hash/maphash/maphash_runtime.go +++ b/src/hash/maphash/maphash_runtime.go @@ -8,7 +8,7 @@ package maphash import ( "internal/abi" - "internal/goarch" + "internal/runtime/maps" "unsafe" ) @@ -29,10 +29,10 @@ func rthash(buf []byte, seed uint64) uint64 { // The runtime hasher only works on uintptr. For 64-bit // architectures, we use the hasher directly. Otherwise, // we use two parallel hashers on the lower and upper 32 bits. - if goarch.PtrSize == 8 { + if maps.Use64BitHash { return uint64(runtime_memhash(unsafe.Pointer(&buf[0]), uintptr(seed), uintptr(len))) } - lo := runtime_memhash(unsafe.Pointer(&buf[0]), uintptr(seed), uintptr(len)) + lo := runtime_memhash(unsafe.Pointer(&buf[0]), uintptr(uint32(seed)), uintptr(len)) hi := runtime_memhash(unsafe.Pointer(&buf[0]), uintptr(seed>>32), uintptr(len)) return uint64(hi)<<32 | uint64(lo) } @@ -51,10 +51,10 @@ func comparableHash[T comparable](v T, seed Seed) uint64 { var m map[T]struct{} mTyp := abi.TypeOf(m) hasher := (*abi.MapType)(unsafe.Pointer(mTyp)).Hasher - if goarch.PtrSize == 8 { + if maps.Use64BitHash { return uint64(hasher(abi.NoEscape(unsafe.Pointer(&v)), uintptr(s))) } - lo := hasher(abi.NoEscape(unsafe.Pointer(&v)), uintptr(s)) + lo := hasher(abi.NoEscape(unsafe.Pointer(&v)), uintptr(uint32(s))) hi := hasher(abi.NoEscape(unsafe.Pointer(&v)), uintptr(s>>32)) return uint64(hi)<<32 | uint64(lo) } diff --git a/src/hash/maphash/smhasher_test.go b/src/hash/maphash/smhasher_test.go index b9621b4c4f3..86ff7c2cdb9 100644 --- a/src/hash/maphash/smhasher_test.go +++ b/src/hash/maphash/smhasher_test.go @@ -8,6 +8,7 @@ package maphash import ( "fmt" + "internal/runtime/maps" "internal/testenv" "math" "math/rand" @@ -15,7 +16,6 @@ import ( "slices" "strings" "testing" - "unsafe" ) // Smhasher is a torture test for hash functions. @@ -486,7 +486,7 @@ func text(t *testing.T, h *hashSet, prefix, suffix string) { // Make sure different seed values generate different hashes. func TestSmhasherSeed(t *testing.T) { - if unsafe.Sizeof(uintptr(0)) == 4 { + if !maps.Use64BitHash { t.Skip("32-bit platforms don't have ideal seed-input distributions (see issue 33988)") } t.Parallel() diff --git a/src/internal/abi/funcpc.go b/src/internal/abi/funcpc.go index e038d365848..54b0735f6b1 100644 --- a/src/internal/abi/funcpc.go +++ b/src/internal/abi/funcpc.go @@ -19,7 +19,7 @@ package abi // compile-time error. // // Implemented as a compile intrinsic. -func FuncPCABI0(f interface{}) uintptr +func FuncPCABI0(f any) uintptr // FuncPCABIInternal returns the entry PC of the function f. If f is a // direct reference of a function, it must be defined as ABIInternal. @@ -28,4 +28,4 @@ func FuncPCABI0(f interface{}) uintptr // the behavior is undefined. // // Implemented as a compile intrinsic. -func FuncPCABIInternal(f interface{}) uintptr +func FuncPCABIInternal(f any) uintptr diff --git a/src/internal/abi/type.go b/src/internal/abi/type.go index 1920a8a37fb..7f44a9de568 100644 --- a/src/internal/abi/type.go +++ b/src/internal/abi/type.go @@ -655,7 +655,7 @@ func writeVarint(buf []byte, n int) int { } } -// Name returns the tag string for n, or empty if there is none. +// Name returns the name of n, or empty if it does not actually have a name. func (n Name) Name() string { if n.Bytes == nil { return "" diff --git a/src/internal/buildcfg/exp.go b/src/internal/buildcfg/exp.go index b05150373b2..9dcac008813 100644 --- a/src/internal/buildcfg/exp.go +++ b/src/internal/buildcfg/exp.go @@ -79,14 +79,15 @@ func ParseGOEXPERIMENT(goos, goarch, goexp string) (*ExperimentFlags, error) { dwarf5Supported := (goos != "darwin" && goos != "ios" && goos != "aix") baseline := goexperiment.Flags{ - RegabiWrappers: regabiSupported, - RegabiArgs: regabiSupported, - SIMD: goarch == "amd64", // TODO remove this (default to false) when dev.simd is merged - Dwarf5: dwarf5Supported, - RandomizedHeapBase64: true, + RegabiWrappers: regabiSupported, + RegabiArgs: regabiSupported, + SIMD: goarch == "amd64", // TODO remove this (default to false) when dev.simd is merged + Dwarf5: dwarf5Supported, + RandomizedHeapBase64: true, + RuntimeFree: true, + SizeSpecializedMalloc: true, + GreenTeaGC: true, } - - // Start with the statically enabled set of experiments. flags := &ExperimentFlags{ Flags: baseline, baseline: baseline, diff --git a/src/internal/chacha8rand/chacha8.go b/src/internal/chacha8rand/chacha8.go index 14a3c04d018..720fc5701a8 100644 --- a/src/internal/chacha8rand/chacha8.go +++ b/src/internal/chacha8rand/chacha8.go @@ -16,6 +16,7 @@ import ( // Offsets into internal/cpu records for use in assembly. const ( offsetLOONG64HasLSX = unsafe.Offsetof(cpu.Loong64.HasLSX) + offsetRISCV64HasV = unsafe.Offsetof(cpu.RISCV64.HasV) ) const ( diff --git a/src/internal/chacha8rand/chacha8_loong64.s b/src/internal/chacha8rand/chacha8_loong64.s index 73a1e5bf05f..873269c182f 100644 --- a/src/internal/chacha8rand/chacha8_loong64.s +++ b/src/internal/chacha8rand/chacha8_loong64.s @@ -77,14 +77,14 @@ lsx_chacha8: VXORV V15, V15, V15 // save seed state for adding back later - VORV V4, V13, V20 - VORV V5, V13, V21 - VORV V6, V13, V22 - VORV V7, V13, V23 - VORV V8, V13, V24 - VORV V9, V13, V25 - VORV V10, V13, V26 - VORV V11, V13, V27 + VMOVQ V4, V20 + VMOVQ V5, V21 + VMOVQ V6, V22 + VMOVQ V7, V23 + VMOVQ V8, V24 + VMOVQ V9, V25 + VMOVQ V10, V26 + VMOVQ V11, V27 // 4 iterations. Each iteration is 8 quarter-rounds. MOVV $4, R7 diff --git a/src/internal/chacha8rand/chacha8_riscv64.s b/src/internal/chacha8rand/chacha8_riscv64.s new file mode 100644 index 00000000000..5514bacc6cb --- /dev/null +++ b/src/internal/chacha8rand/chacha8_riscv64.s @@ -0,0 +1,113 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "asm_riscv64.h" +#include "go_asm.h" +#include "textflag.h" + +// TODO(mzh): use Zvkb if possible + +#define QR(A, B, C, D) \ + VADDVV A, B, A \ + VXORVV D, A, D \ + VSLLVI $16, D, V28 \ + VSRLVI $16, D, D \ + VXORVV V28, D, D \ + VADDVV D, C, C \ + VXORVV C, B, B \ + VSLLVI $12, B, V29 \ + VSRLVI $20, B, B \ + VXORVV V29, B, B \ + VADDVV B, A, A \ + VXORVV A, D, D \ + VSLLVI $8, D, V30 \ + VSRLVI $24, D, D \ + VXORVV V30, D, D \ + VADDVV D, C, C \ + VXORVV C, B, B \ + VSLLVI $7, B, V31 \ + VSRLVI $25, B, B \ + VXORVV V31, B, B + +// block runs four ChaCha8 block transformations using four elements in each V register. +// func block(seed *[8]uint32, blocks *[16][4]uint32, counter uint32) +TEXT ·block(SB), NOSPLIT, $0 + // seed in X10 + // blocks in X11 + // counter in X12 + +#ifndef hasV + MOVB internal∕cpu·RISCV64+const_offsetRISCV64HasV(SB), X13 + BNEZ X13, vector_chacha8 + JMP ·block_generic(SB) +#endif + +vector_chacha8: + // At least VLEN >= 128 + VSETIVLI $4, E32, M1, TA, MA, X0 + // Load initial constants into top row. + MOV $·chachaConst(SB), X14 + VLSSEG4E32V (X14), X0, V0 // V0, V1, V2, V3 = const row + VLSSEG8E32V (X10), X0, V4 // V4 ... V11, seed + VIDV V12 + VADDVX X12, V12, V12 // counter + + // Clear all nonces. + VXORVV V13, V13, V13 + VXORVV V14, V14, V14 + VXORVV V15, V15, V15 + + // Copy initial state. + VMV4RV V4, V20 + VMV4RV V8, V24 + + MOV $4, X15 + PCALIGN $16 +loop: + QR(V0, V4, V8, V12) + QR(V1, V5, V9, V13) + QR(V2, V6, V10, V14) + QR(V3, V7, V11, V15) + + QR(V0, V5, V10, V15) + QR(V1, V6, V11, V12) + QR(V2, V7, V8, V13) + QR(V3, V4, V9, V14) + + SUB $1, X15 + BNEZ X15, loop + + VADDVV V20, V4, V4 + VADDVV V21, V5, V5 + VADDVV V22, V6, V6 + VADDVV V23, V7, V7 + VADDVV V24, V8, V8 + VADDVV V25, V9, V9 + VADDVV V26, V10, V10 + VADDVV V27, V11, V11 + + VSE32V V0, (X11); ADD $16, X11; + VSE32V V1, (X11); ADD $16, X11; + VSE32V V2, (X11); ADD $16, X11; + VSE32V V3, (X11); ADD $16, X11; + VSE32V V4, (X11); ADD $16, X11; + VSE32V V5, (X11); ADD $16, X11; + VSE32V V6, (X11); ADD $16, X11; + VSE32V V7, (X11); ADD $16, X11; + VSE32V V8, (X11); ADD $16, X11; + VSE32V V9, (X11); ADD $16, X11; + VSE32V V10, (X11); ADD $16, X11; + VSE32V V11, (X11); ADD $16, X11; + VSE32V V12, (X11); ADD $16, X11; + VSE32V V13, (X11); ADD $16, X11; + VSE32V V14, (X11); ADD $16, X11; + VSE32V V15, (X11); ADD $16, X11; + + RET + +GLOBL ·chachaConst(SB), NOPTR|RODATA, $32 +DATA ·chachaConst+0x00(SB)/4, $0x61707865 +DATA ·chachaConst+0x04(SB)/4, $0x3320646e +DATA ·chachaConst+0x08(SB)/4, $0x79622d32 +DATA ·chachaConst+0x0c(SB)/4, $0x6b206574 diff --git a/src/internal/chacha8rand/chacha8_stub.s b/src/internal/chacha8rand/chacha8_stub.s index 92858c118f4..64245e28af9 100644 --- a/src/internal/chacha8rand/chacha8_stub.s +++ b/src/internal/chacha8rand/chacha8_stub.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !amd64 && !arm64 && !loong64 +//go:build !amd64 && !arm64 && !loong64 && !riscv64 #include "textflag.h" diff --git a/src/internal/coverage/cmerge/merge.go b/src/internal/coverage/cmerge/merge.go index 1339803d086..9fc548eced5 100644 --- a/src/internal/coverage/cmerge/merge.go +++ b/src/internal/coverage/cmerge/merge.go @@ -57,7 +57,7 @@ func (m *Merger) MergeCounters(dst, src []uint32) (error, bool) { return nil, ovf } -// Saturating add does a saturating addition of 'dst' and 'src', +// SaturatingAdd does a saturating addition of 'dst' and 'src', // returning added value or math.MaxUint32 if there is an overflow. // Overflows are recorded in case the client needs to track them. func (m *Merger) SaturatingAdd(dst, src uint32) uint32 { @@ -68,7 +68,7 @@ func (m *Merger) SaturatingAdd(dst, src uint32) uint32 { return result } -// Saturating add does a saturating addition of 'dst' and 'src', +// SaturatingAdd does a saturating addition of 'dst' and 'src', // returning added value or math.MaxUint32 plus an overflow flag. func SaturatingAdd(dst, src uint32) (uint32, bool) { d, s := uint64(dst), uint64(src) diff --git a/src/internal/coverage/pkid.go b/src/internal/coverage/pkid.go index 09501e6bd2a..213a1ecfd15 100644 --- a/src/internal/coverage/pkid.go +++ b/src/internal/coverage/pkid.go @@ -63,12 +63,12 @@ var rtPkgs = [...]string{ "internal/runtime/exithook", "internal/runtime/gc", "internal/runtime/math", - "internal/runtime/strconv", "internal/runtime/sys", "internal/runtime/maps", "internal/runtime/syscall/linux", "internal/runtime/syscall/windows", "internal/runtime/cgroup", + "internal/strconv", "internal/stringslite", "runtime", } diff --git a/src/internal/coverage/pods/pods.go b/src/internal/coverage/pods/pods.go index e6180fb241e..15b56f823ae 100644 --- a/src/internal/coverage/pods/pods.go +++ b/src/internal/coverage/pods/pods.go @@ -192,7 +192,7 @@ func collectPodsImpl(files []string, dirIndices []int, warn bool) []Pod { return pods } -func warning(s string, a ...interface{}) { +func warning(s string, a ...any) { fmt.Fprintf(os.Stderr, "warning: ") fmt.Fprintf(os.Stderr, s, a...) fmt.Fprintf(os.Stderr, "\n") diff --git a/src/internal/ftoa/ftoa.go b/src/internal/ftoa/ftoa.go deleted file mode 100644 index 678668c719b..00000000000 --- a/src/internal/ftoa/ftoa.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// A hook to get correct floating point conversion from strconv -// in packages that cannot import strconv. - -package ftoa - -var formatFloatPtr func(f float64, fmt byte, prec, bitSize int) string - -func FormatFloat(f float64, fmt byte, prec, bitSize int) string { - if formatFloatPtr != nil { - return formatFloatPtr(f, fmt, prec, bitSize) - } - return "internal/ftoa.formatFloatPtr called before strconv.init()" -} - -func SetFormatFloat(ff func(f float64, fmt byte, prec, bitSize int) string) { - if formatFloatPtr == nil { - formatFloatPtr = ff - } -} diff --git a/src/internal/fuzz/encoding_test.go b/src/internal/fuzz/encoding_test.go index a46a3474038..5f2af4476b8 100644 --- a/src/internal/fuzz/encoding_test.go +++ b/src/internal/fuzz/encoding_test.go @@ -260,7 +260,6 @@ func BenchmarkMarshalCorpusFile(b *testing.B) { } for sz := 1; sz <= len(buf); sz <<= 1 { - sz := sz b.Run(strconv.Itoa(sz), func(b *testing.B) { for i := 0; i < b.N; i++ { b.SetBytes(int64(sz)) @@ -280,7 +279,6 @@ func BenchmarkUnmarshalCorpusFile(b *testing.B) { } for sz := 1; sz <= len(buf); sz <<= 1 { - sz := sz data := marshalCorpusFile(buf[:sz]) b.Run(strconv.Itoa(sz), func(b *testing.B) { for i := 0; i < b.N; i++ { diff --git a/src/internal/fuzz/minimize_test.go b/src/internal/fuzz/minimize_test.go index e7e23e5a052..79d986374f0 100644 --- a/src/internal/fuzz/minimize_test.go +++ b/src/internal/fuzz/minimize_test.go @@ -132,7 +132,6 @@ func TestMinimizeInput(t *testing.T) { } for _, tc := range cases { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() ws := &workerServer{ diff --git a/src/internal/fuzz/worker_test.go b/src/internal/fuzz/worker_test.go index d0b21da7838..9420248d2c0 100644 --- a/src/internal/fuzz/worker_test.go +++ b/src/internal/fuzz/worker_test.go @@ -182,7 +182,6 @@ func BenchmarkWorkerMinimize(b *testing.B) { bytes := make([]byte, 1024) ctx := context.Background() for sz := 1; sz <= len(bytes); sz <<= 1 { - sz := sz input := []any{bytes[:sz]} encodedVals := marshalCorpusFile(input...) mem = <-ws.memMu diff --git a/src/internal/goarch/goarch.go b/src/internal/goarch/goarch.go index 4da56dda9da..efcf298d3b7 100644 --- a/src/internal/goarch/goarch.go +++ b/src/internal/goarch/goarch.go @@ -34,7 +34,7 @@ const ( // It is also the size of the machine's native word size (that is, 4 on 32-bit systems, 8 on 64-bit). const PtrSize = 4 << (^uintptr(0) >> 63) -// PtrSize is bit width of a pointer. +// PtrBits is bit width of a pointer. const PtrBits = PtrSize * 8 // ArchFamily is the architecture family (AMD64, ARM, ...) diff --git a/src/internal/godebugs/godebugs_test.go b/src/internal/godebugs/godebugs_test.go index 168acc134aa..e242f58c553 100644 --- a/src/internal/godebugs/godebugs_test.go +++ b/src/internal/godebugs/godebugs_test.go @@ -93,3 +93,11 @@ func incNonDefaults(t *testing.T) map[string]bool { } return seen } + +func TestRemoved(t *testing.T) { + for _, info := range godebugs.Removed { + if godebugs.Lookup(info.Name) != nil { + t.Fatalf("GODEBUG: %v exists in both Removed and All", info.Name) + } + } +} diff --git a/src/internal/godebugs/table.go b/src/internal/godebugs/table.go index 2d008825459..4939e6ff109 100644 --- a/src/internal/godebugs/table.go +++ b/src/internal/godebugs/table.go @@ -42,6 +42,7 @@ var All = []Info{ {Name: "http2client", Package: "net/http"}, {Name: "http2debug", Package: "net/http", Opaque: true}, {Name: "http2server", Package: "net/http"}, + {Name: "httpcookiemaxnum", Package: "net/http", Changed: 24, Old: "0"}, {Name: "httplaxcontentlength", Package: "net/http", Changed: 22, Old: "1"}, {Name: "httpmuxgo121", Package: "net/http", Changed: 22, Old: "1"}, {Name: "httpservecontentkeepheaders", Package: "net/http", Changed: 23, Old: "1"}, @@ -66,6 +67,7 @@ var All = []Info{ {Name: "tlssha1", Package: "crypto/tls", Changed: 25, Old: "1"}, {Name: "tlsunsafeekm", Package: "crypto/tls", Changed: 22, Old: "1"}, {Name: "updatemaxprocs", Package: "runtime", Changed: 25, Old: "0"}, + {Name: "urlstrictcolons", Package: "net/url", Changed: 26, Old: "0"}, {Name: "winreadlinkvolume", Package: "os", Changed: 23, Old: "0"}, {Name: "winsymlink", Package: "os", Changed: 23, Old: "0"}, {Name: "x509keypairleaf", Package: "crypto/tls", Changed: 23, Old: "0"}, @@ -77,6 +79,16 @@ var All = []Info{ {Name: "zipinsecurepath", Package: "archive/zip"}, } +type RemovedInfo struct { + Name string // name of the removed GODEBUG setting. + Removed int // minor version of Go, when the removal happened +} + +// Removed contains all GODEBUGs that we have removed. +var Removed = []RemovedInfo{ + {Name: "x509sha1", Removed: 24}, +} + // Lookup returns the Info with the given name. func Lookup(name string) *Info { // binary search, avoiding import of sort. diff --git a/src/internal/goexperiment/exp_runtimefree_off.go b/src/internal/goexperiment/exp_runtimefree_off.go new file mode 100644 index 00000000000..3affe434f2f --- /dev/null +++ b/src/internal/goexperiment/exp_runtimefree_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.runtimefree + +package goexperiment + +const RuntimeFree = false +const RuntimeFreeInt = 0 diff --git a/src/internal/goexperiment/exp_runtimefree_on.go b/src/internal/goexperiment/exp_runtimefree_on.go new file mode 100644 index 00000000000..176278b5425 --- /dev/null +++ b/src/internal/goexperiment/exp_runtimefree_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.runtimefree + +package goexperiment + +const RuntimeFree = true +const RuntimeFreeInt = 1 diff --git a/src/internal/goexperiment/exp_synctest_off.go b/src/internal/goexperiment/exp_synctest_off.go deleted file mode 100644 index fade13f89ca..00000000000 --- a/src/internal/goexperiment/exp_synctest_off.go +++ /dev/null @@ -1,8 +0,0 @@ -// Code generated by mkconsts.go. DO NOT EDIT. - -//go:build !goexperiment.synctest - -package goexperiment - -const Synctest = false -const SynctestInt = 0 diff --git a/src/internal/goexperiment/exp_synctest_on.go b/src/internal/goexperiment/exp_synctest_on.go deleted file mode 100644 index 9c44be72761..00000000000 --- a/src/internal/goexperiment/exp_synctest_on.go +++ /dev/null @@ -1,8 +0,0 @@ -// Code generated by mkconsts.go. DO NOT EDIT. - -//go:build goexperiment.synctest - -package goexperiment - -const Synctest = true -const SynctestInt = 1 diff --git a/src/internal/goexperiment/flags.go b/src/internal/goexperiment/flags.go index f8c9b637f3d..da6a6b53ad2 100644 --- a/src/internal/goexperiment/flags.go +++ b/src/internal/goexperiment/flags.go @@ -100,9 +100,6 @@ type Flags struct { // inlining phase within the Go compiler. NewInliner bool - // Synctest enables the testing/synctest package. - Synctest bool - // Dwarf5 enables DWARF version 5 debug info generation. Dwarf5 bool @@ -116,6 +113,9 @@ type Flags struct { // platforms. RandomizedHeapBase64 bool + // RuntimeFree enables the runtime to free and reuse memory more eagerly in some circumstances with compiler help. + RuntimeFree bool + // SizeSpecializedMalloc enables malloc implementations that are specialized per size class. SizeSpecializedMalloc bool diff --git a/src/internal/goroot/gc.go b/src/internal/goroot/gc.go index 133d0763913..534ad57e709 100644 --- a/src/internal/goroot/gc.go +++ b/src/internal/goroot/gc.go @@ -77,8 +77,8 @@ func (gd *gccgoDirs) init() { const prefix = "libraries: =" var dirs []string for _, dirEntry := range dirsEntries { - if strings.HasPrefix(dirEntry, prefix) { - dirs = filepath.SplitList(strings.TrimPrefix(dirEntry, prefix)) + if after, ok := strings.CutPrefix(dirEntry, prefix); ok { + dirs = filepath.SplitList(after) break } } diff --git a/src/internal/itoa/itoa.go b/src/internal/itoa/itoa.go deleted file mode 100644 index 4340ae0e2d5..00000000000 --- a/src/internal/itoa/itoa.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Simple conversions to avoid depending on strconv. - -package itoa - -// Itoa converts val to a decimal string. -func Itoa(val int) string { - if val < 0 { - return "-" + Uitoa(uint(-val)) - } - return Uitoa(uint(val)) -} - -// Uitoa converts val to a decimal string. -func Uitoa(val uint) string { - if val == 0 { // avoid string allocation - return "0" - } - var buf [20]byte // big enough for 64bit value base 10 - i := len(buf) - 1 - for val >= 10 { - q := val / 10 - buf[i] = byte('0' + val - q*10) - i-- - val = q - } - // val < 10 - buf[i] = byte('0' + val) - return string(buf[i:]) -} - -const hex = "0123456789abcdef" - -// Uitox converts val (a uint) to a hexadecimal string. -func Uitox(val uint) string { - if val == 0 { // avoid string allocation - return "0x0" - } - var buf [20]byte // big enough for 64bit value base 16 + 0x - i := len(buf) - 1 - for val >= 16 { - q := val / 16 - buf[i] = hex[val%16] - i-- - val = q - } - // val < 16 - buf[i] = hex[val%16] - i-- - buf[i] = 'x' - i-- - buf[i] = '0' - return string(buf[i:]) -} diff --git a/src/internal/itoa/itoa_test.go b/src/internal/itoa/itoa_test.go deleted file mode 100644 index 8bed8885323..00000000000 --- a/src/internal/itoa/itoa_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package itoa_test - -import ( - "fmt" - "internal/itoa" - "math" - "testing" -) - -var ( - minInt64 int64 = math.MinInt64 - maxInt64 int64 = math.MaxInt64 - maxUint64 uint64 = math.MaxUint64 -) - -func TestItoa(t *testing.T) { - tests := []int{int(minInt64), math.MinInt32, -999, -100, -1, 0, 1, 100, 999, math.MaxInt32, int(maxInt64)} - for _, tt := range tests { - got := itoa.Itoa(tt) - want := fmt.Sprint(tt) - if want != got { - t.Fatalf("Itoa(%d) = %s, want %s", tt, got, want) - } - } -} - -func TestUitoa(t *testing.T) { - tests := []uint{0, 1, 100, 999, math.MaxUint32, uint(maxUint64)} - for _, tt := range tests { - got := itoa.Uitoa(tt) - want := fmt.Sprint(tt) - if want != got { - t.Fatalf("Uitoa(%d) = %s, want %s", tt, got, want) - } - } -} - -func TestUitox(t *testing.T) { - tests := []uint{0, 1, 15, 100, 999, math.MaxUint32, uint(maxUint64)} - for _, tt := range tests { - got := itoa.Uitox(tt) - want := fmt.Sprintf("%#x", tt) - if want != got { - t.Fatalf("Uitox(%x) = %s, want %s", tt, got, want) - } - } -} diff --git a/src/internal/platform/supported.go b/src/internal/platform/supported.go index f9706a6988e..778d727086a 100644 --- a/src/internal/platform/supported.go +++ b/src/internal/platform/supported.go @@ -89,11 +89,6 @@ func MustLinkExternal(goos, goarch string, withCgo bool) bool { // Internally linking cgo is incomplete on some architectures. // https://go.dev/issue/14449 return true - case "arm64": - if goos == "windows" { - // windows/arm64 internal linking is not implemented. - return true - } case "ppc64": // Big Endian PPC64 cgo internal linking is not implemented for aix or linux. // https://go.dev/issue/8912 diff --git a/src/internal/poll/fd_io_plan9.go b/src/internal/poll/fd_io_plan9.go index 3205ac8513e..ab9ae13eb45 100644 --- a/src/internal/poll/fd_io_plan9.go +++ b/src/internal/poll/fd_io_plan9.go @@ -5,7 +5,7 @@ package poll import ( - "internal/itoa" + "internal/strconv" "runtime" "sync" "syscall" @@ -72,7 +72,7 @@ func (aio *asyncIO) Cancel() { if aio.pid == -1 { return } - f, e := syscall.Open("/proc/"+itoa.Itoa(aio.pid)+"/note", syscall.O_WRONLY) + f, e := syscall.Open("/proc/"+strconv.Itoa(aio.pid)+"/note", syscall.O_WRONLY) if e != nil { return } diff --git a/src/internal/poll/fd_unix.go b/src/internal/poll/fd_unix.go index 31e6e21120f..f56173524d7 100644 --- a/src/internal/poll/fd_unix.go +++ b/src/internal/poll/fd_unix.go @@ -7,7 +7,7 @@ package poll import ( - "internal/itoa" + "internal/strconv" "internal/syscall/unix" "io" "sync/atomic" @@ -379,7 +379,7 @@ func (fd *FD) Write(p []byte) (int, error) { // If we don't check this we will panic // with slice bounds out of range. // Use a more informative panic. - panic("invalid return from write: got " + itoa.Itoa(n) + " from a write of " + itoa.Itoa(max-nn)) + panic("invalid return from write: got " + strconv.Itoa(n) + " from a write of " + strconv.Itoa(max-nn)) } nn += n } diff --git a/src/internal/profile/proto.go b/src/internal/profile/proto.go index 58ff0ad2e07..ad6f621f883 100644 --- a/src/internal/profile/proto.go +++ b/src/internal/profile/proto.go @@ -24,6 +24,7 @@ package profile import ( "errors" "fmt" + "slices" ) type buffer struct { @@ -175,6 +176,16 @@ func le32(p []byte) uint32 { return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 } +func peekNumVarints(data []byte) (numVarints int) { + for ; len(data) > 0; numVarints++ { + var err error + if _, data, err = decodeVarint(data); err != nil { + break + } + } + return numVarints +} + func decodeVarint(data []byte) (uint64, []byte, error) { var i int var u uint64 @@ -275,6 +286,9 @@ func decodeInt64(b *buffer, x *int64) error { func decodeInt64s(b *buffer, x *[]int64) error { if b.typ == 2 { // Packed encoding + dataLen := peekNumVarints(b.data) + *x = slices.Grow(*x, dataLen) + data := b.data for len(data) > 0 { var u uint64 @@ -305,8 +319,11 @@ func decodeUint64(b *buffer, x *uint64) error { func decodeUint64s(b *buffer, x *[]uint64) error { if b.typ == 2 { - data := b.data // Packed encoding + dataLen := peekNumVarints(b.data) + *x = slices.Grow(*x, dataLen) + + data := b.data for len(data) > 0 { var u uint64 var err error diff --git a/src/internal/reflectlite/reflect_mirror_test.go b/src/internal/reflectlite/reflect_mirror_test.go index 8d136415163..c5642d092d9 100644 --- a/src/internal/reflectlite/reflect_mirror_test.go +++ b/src/internal/reflectlite/reflect_mirror_test.go @@ -101,7 +101,6 @@ func TestMirrorWithReflect(t *testing.T) { {".", "reflectlite", rl}, {reflectDir, "reflect", r}, } { - tc := tc wg.Add(1) go func() { defer wg.Done() diff --git a/src/internal/runtime/cgobench/bench_test.go b/src/internal/runtime/cgobench/bench_test.go index b4d8efec5ef..3b8f9a8ca3a 100644 --- a/src/internal/runtime/cgobench/bench_test.go +++ b/src/internal/runtime/cgobench/bench_test.go @@ -24,3 +24,17 @@ func BenchmarkCgoCallParallel(b *testing.B) { } }) } + +func BenchmarkCgoCallWithCallback(b *testing.B) { + for b.Loop() { + cgobench.Callback() + } +} + +func BenchmarkCgoCallParallelWithCallback(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + cgobench.Callback() + } + }) +} diff --git a/src/internal/runtime/cgobench/funcs.go b/src/internal/runtime/cgobench/funcs.go index db685180a1b..91efa512789 100644 --- a/src/internal/runtime/cgobench/funcs.go +++ b/src/internal/runtime/cgobench/funcs.go @@ -9,9 +9,24 @@ package cgobench /* static void empty() { } + +void go_empty_callback(); + +static void callback() { + go_empty_callback(); +} + */ import "C" func Empty() { C.empty() } + +func Callback() { + C.callback() +} + +//export go_empty_callback +func go_empty_callback() { +} diff --git a/src/internal/runtime/cgroup/cgroup_linux.go b/src/internal/runtime/cgroup/cgroup_linux.go index 91815b4a1d0..7b35a9bc187 100644 --- a/src/internal/runtime/cgroup/cgroup_linux.go +++ b/src/internal/runtime/cgroup/cgroup_linux.go @@ -6,8 +6,8 @@ package cgroup import ( "internal/bytealg" - "internal/runtime/strconv" "internal/runtime/syscall/linux" + "internal/strconv" ) var ( @@ -220,8 +220,8 @@ func parseV1Number(buf []byte) (int64, error) { } buf = buf[:i] - val, ok := strconv.Atoi64(string(buf)) - if !ok { + val, err := strconv.ParseInt(string(buf), 10, 64) + if err != nil { return 0, errMalformedFile } @@ -280,13 +280,13 @@ func parseV2Limit(buf []byte) (float64, bool, error) { } periodStr = periodStr[:i] - quota, ok := strconv.Atoi64(string(quotaStr)) - if !ok { + quota, err := strconv.ParseInt(string(quotaStr), 10, 64) + if err != nil { return 0, false, errMalformedFile } - period, ok := strconv.Atoi64(string(periodStr)) - if !ok { + period, err := strconv.ParseInt(string(periodStr), 10, 64) + if err != nil { return 0, false, errMalformedFile } diff --git a/src/internal/runtime/gc/scan/scan_amd64.s b/src/internal/runtime/gc/scan/scan_amd64.s index 055995fa38c..9b4950a7676 100644 --- a/src/internal/runtime/gc/scan/scan_amd64.s +++ b/src/internal/runtime/gc/scan/scan_amd64.s @@ -86,7 +86,24 @@ loop: // Collect just the pointers from the greyed objects into the scan buffer, // i.e., copy the word indices in the mask from Z1 into contiguous memory. - VPCOMPRESSQ Z1, K1, (DI)(DX*8) + // + // N.B. VPCOMPRESSQ supports a memory destination. Unfortunately, on + // AMD Genoa / Zen 4, using VPCOMPRESSQ with a memory destination + // imposes a severe performance penalty of around an order of magnitude + // compared to a register destination. + // + // This workaround is unfortunate on other microarchitectures, where a + // memory destination is slightly faster than adding an additional move + // instruction, but no where near an order of magnitude. It would be + // nice to have a Genoa-only variant here. + // + // AMD Turin / Zen 5 fixes this issue. + // + // See + // https://lemire.me/blog/2025/02/14/avx-512-gotcha-avoid-compressing-words-to-memory-with-amd-zen-4-processors/. + VPCOMPRESSQ Z1, K1, Z2 + VMOVDQU64 Z2, (DI)(DX*8) + // Advance the scan buffer position by the number of pointers. MOVBQZX 128(AX), CX ADDQ CX, DX diff --git a/src/internal/runtime/gc/scan/scan_test.go b/src/internal/runtime/gc/scan/scan_test.go index 14a0f6f7f48..1208783b6f7 100644 --- a/src/internal/runtime/gc/scan/scan_test.go +++ b/src/internal/runtime/gc/scan/scan_test.go @@ -114,7 +114,8 @@ func benchmarkScanSpanPackedAllSizeClasses(b *testing.B, nPages int) { if sc == 0 { continue } - if sc >= gc.MinSizeForMallocHeader { + size := gc.SizeClassToSize[sc] + if size >= gc.MinSizeForMallocHeader { break } b.Run(fmt.Sprintf("sizeclass=%d", sc), func(b *testing.B) { diff --git a/src/internal/runtime/maps/map.go b/src/internal/runtime/maps/map.go index 865a3f36c20..515558a94f6 100644 --- a/src/internal/runtime/maps/map.go +++ b/src/internal/runtime/maps/map.go @@ -245,8 +245,12 @@ type Map struct { clearSeq uint64 } +// Use 64-bit hash on 64-bit systems, except on Wasm, where we use +// 32-bit hash (see runtime/hash32.go). +const Use64BitHash = goarch.PtrSize == 8 && goarch.IsWasm == 0 + func depthToShift(depth uint8) uint8 { - if goarch.PtrSize == 4 { + if !Use64BitHash { return 32 - depth } return 64 - depth diff --git a/src/internal/runtime/maps/table.go b/src/internal/runtime/maps/table.go index 73ea76766ba..fbce099655e 100644 --- a/src/internal/runtime/maps/table.go +++ b/src/internal/runtime/maps/table.go @@ -6,7 +6,6 @@ package maps import ( "internal/abi" - "internal/goarch" "internal/runtime/math" "unsafe" ) @@ -716,7 +715,7 @@ func (it *Iter) Key() unsafe.Pointer { return it.key } -// Key returns a pointer to the current element. nil indicates end of +// Elem returns a pointer to the current element. nil indicates end of // iteration. // // Must not be called prior to Next. @@ -1170,7 +1169,7 @@ func (t *table) rehash(typ *abi.MapType, m *Map) { // Bitmask for the last selection bit at this depth. func localDepthMask(localDepth uint8) uintptr { - if goarch.PtrSize == 4 { + if !Use64BitHash { return uintptr(1) << (32 - localDepth) } return uintptr(1) << (64 - localDepth) diff --git a/src/internal/runtime/strconv/atoi.go b/src/internal/runtime/strconv/atoi.go deleted file mode 100644 index 0308757c6f7..00000000000 --- a/src/internal/runtime/strconv/atoi.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package strconv - -import ( - "internal/runtime/math" -) - -// Atoi64 parses an int64 from a string s. -// The bool result reports whether s is a number -// representable by a value of type int64. -func Atoi64(s string) (int64, bool) { - if s == "" { - return 0, false - } - - neg := false - if s[0] == '-' { - neg = true - s = s[1:] - } - - un := uint64(0) - for i := 0; i < len(s); i++ { - c := s[i] - if c < '0' || c > '9' { - return 0, false - } - if un > math.MaxUint64/10 { - // overflow - return 0, false - } - un *= 10 - un1 := un + uint64(c) - '0' - if un1 < un { - // overflow - return 0, false - } - un = un1 - } - - if !neg && un > uint64(math.MaxInt64) { - return 0, false - } - if neg && un > uint64(math.MaxInt64)+1 { - return 0, false - } - - n := int64(un) - if neg { - n = -n - } - - return n, true -} - -// Atoi is like Atoi64 but for integers -// that fit into an int. -func Atoi(s string) (int, bool) { - if n, ok := Atoi64(s); n == int64(int(n)) { - return int(n), ok - } - return 0, false -} - -// Atoi32 is like Atoi but for integers -// that fit into an int32. -func Atoi32(s string) (int32, bool) { - if n, ok := Atoi64(s); n == int64(int32(n)) { - return int32(n), ok - } - return 0, false -} diff --git a/src/internal/runtime/strconv/atoi_test.go b/src/internal/runtime/strconv/atoi_test.go deleted file mode 100644 index 71a8030b1d0..00000000000 --- a/src/internal/runtime/strconv/atoi_test.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package strconv_test - -import ( - "internal/runtime/strconv" - "testing" -) - -const intSize = 32 << (^uint(0) >> 63) - -type atoi64Test struct { - in string - out int64 - ok bool -} - -var atoi64tests = []atoi64Test{ - {"", 0, false}, - {"0", 0, true}, - {"-0", 0, true}, - {"1", 1, true}, - {"-1", -1, true}, - {"12345", 12345, true}, - {"-12345", -12345, true}, - {"012345", 12345, true}, - {"-012345", -12345, true}, - {"12345x", 0, false}, - {"-12345x", 0, false}, - {"98765432100", 98765432100, true}, - {"-98765432100", -98765432100, true}, - {"20496382327982653440", 0, false}, - {"-20496382327982653440", 0, false}, - {"9223372036854775807", 1<<63 - 1, true}, - {"-9223372036854775807", -(1<<63 - 1), true}, - {"9223372036854775808", 0, false}, - {"-9223372036854775808", -1 << 63, true}, - {"9223372036854775809", 0, false}, - {"-9223372036854775809", 0, false}, -} - -func TestAtoi(t *testing.T) { - switch intSize { - case 32: - for i := range atoi32tests { - test := &atoi32tests[i] - out, ok := strconv.Atoi(test.in) - if test.out != int32(out) || test.ok != ok { - t.Errorf("Atoi(%q) = (%v, %v) want (%v, %v)", - test.in, out, ok, test.out, test.ok) - } - } - case 64: - for i := range atoi64tests { - test := &atoi64tests[i] - out, ok := strconv.Atoi(test.in) - if test.out != int64(out) || test.ok != ok { - t.Errorf("Atoi(%q) = (%v, %v) want (%v, %v)", - test.in, out, ok, test.out, test.ok) - } - } - } -} - -type atoi32Test struct { - in string - out int32 - ok bool -} - -var atoi32tests = []atoi32Test{ - {"", 0, false}, - {"0", 0, true}, - {"-0", 0, true}, - {"1", 1, true}, - {"-1", -1, true}, - {"12345", 12345, true}, - {"-12345", -12345, true}, - {"012345", 12345, true}, - {"-012345", -12345, true}, - {"12345x", 0, false}, - {"-12345x", 0, false}, - {"987654321", 987654321, true}, - {"-987654321", -987654321, true}, - {"2147483647", 1<<31 - 1, true}, - {"-2147483647", -(1<<31 - 1), true}, - {"2147483648", 0, false}, - {"-2147483648", -1 << 31, true}, - {"2147483649", 0, false}, - {"-2147483649", 0, false}, -} - -func TestAtoi32(t *testing.T) { - for i := range atoi32tests { - test := &atoi32tests[i] - out, ok := strconv.Atoi32(test.in) - if test.out != out || test.ok != ok { - t.Errorf("Atoi32(%q) = (%v, %v) want (%v, %v)", - test.in, out, ok, test.out, test.ok) - } - } -} diff --git a/src/internal/runtime/sys/intrinsics.go b/src/internal/runtime/sys/intrinsics.go index 147d5581f25..69609192ce9 100644 --- a/src/internal/runtime/sys/intrinsics.go +++ b/src/internal/runtime/sys/intrinsics.go @@ -109,7 +109,7 @@ func Len64(x uint64) (n int) { x >>= 8 n += 8 } - return n + int(len8tab[x]) + return n + int(len8tab[uint8(x)]) } // --- OnesCount --- diff --git a/src/internal/runtime/wasitest/testdata/tcpecho.go b/src/internal/runtime/wasitest/testdata/tcpecho.go index 819e3526885..6da56acba10 100644 --- a/src/internal/runtime/wasitest/testdata/tcpecho.go +++ b/src/internal/runtime/wasitest/testdata/tcpecho.go @@ -62,8 +62,7 @@ func findListener() (net.Listener, error) { l, err := net.FileListener(f) f.Close() - var se syscall.Errno - switch errors.As(err, &se); se { + switch se, _ := errors.AsType[syscall.Errno](err); se { case syscall.ENOTSOCK: continue case syscall.EBADF: diff --git a/src/internal/singleflight/singleflight_test.go b/src/internal/singleflight/singleflight_test.go index 279e1beda1b..0cce6a7422b 100644 --- a/src/internal/singleflight/singleflight_test.go +++ b/src/internal/singleflight/singleflight_test.go @@ -97,7 +97,7 @@ func TestForgetUnshared(t *testing.T) { key := "key" firstCh := make(chan struct{}) go func() { - g.Do(key, func() (i interface{}, e error) { + g.Do(key, func() (i any, e error) { firstStarted.Done() <-firstCh return @@ -110,7 +110,7 @@ func TestForgetUnshared(t *testing.T) { secondCh := make(chan struct{}) go func() { - g.Do(key, func() (i interface{}, e error) { + g.Do(key, func() (i any, e error) { // Notify that we started secondCh <- struct{}{} <-secondCh @@ -120,7 +120,7 @@ func TestForgetUnshared(t *testing.T) { <-secondCh - resultCh := g.DoChan(key, func() (i interface{}, e error) { + resultCh := g.DoChan(key, func() (i any, e error) { panic("third must not be started") }) @@ -155,7 +155,7 @@ func TestDoAndForgetUnsharedRace(t *testing.T) { wg.Add(n) for i := 0; i < n; i++ { go func() { - g.Do(key, func() (interface{}, error) { + g.Do(key, func() (any, error) { time.Sleep(d) return calls.Add(1), nil }) diff --git a/src/strconv/atob.go b/src/internal/strconv/atob.go similarity index 95% rename from src/strconv/atob.go rename to src/internal/strconv/atob.go index 0a495008d77..cbeba7f8bc9 100644 --- a/src/strconv/atob.go +++ b/src/internal/strconv/atob.go @@ -14,7 +14,7 @@ func ParseBool(str string) (bool, error) { case "0", "f", "F", "false", "FALSE", "False": return false, nil } - return false, syntaxError("ParseBool", str) + return false, ErrSyntax } // FormatBool returns "true" or "false" according to the value of b. diff --git a/src/strconv/atob_test.go b/src/internal/strconv/atob_test.go similarity index 72% rename from src/strconv/atob_test.go rename to src/internal/strconv/atob_test.go index 40d43a9f8f9..61f543df308 100644 --- a/src/strconv/atob_test.go +++ b/src/internal/strconv/atob_test.go @@ -6,7 +6,7 @@ package strconv_test import ( "bytes" - . "strconv" + . "internal/strconv" "testing" ) @@ -36,23 +36,8 @@ var atobtests = []atobTest{ func TestParseBool(t *testing.T) { for _, test := range atobtests { b, e := ParseBool(test.in) - if test.err != nil { - // expect an error - if e == nil { - t.Errorf("ParseBool(%s) = nil; want %s", test.in, test.err) - } else { - // NumError assertion must succeed; it's the only thing we return. - if e.(*NumError).Err != test.err { - t.Errorf("ParseBool(%s) = %s; want %s", test.in, e, test.err) - } - } - } else { - if e != nil { - t.Errorf("ParseBool(%s) = %s; want nil", test.in, e) - } - if b != test.out { - t.Errorf("ParseBool(%s) = %t; want %t", test.in, b, test.out) - } + if b != test.out || e != test.err { + t.Errorf("ParseBool(%s) = %v, %v, want %v, %v", test.in, b, e, test.out, test.err) } } } diff --git a/src/strconv/atoc.go b/src/internal/strconv/atoc.go similarity index 80% rename from src/strconv/atoc.go rename to src/internal/strconv/atoc.go index 560bd7920df..52f2fc82af3 100644 --- a/src/strconv/atoc.go +++ b/src/internal/strconv/atoc.go @@ -4,23 +4,6 @@ package strconv -import "internal/stringslite" - -const fnParseComplex = "ParseComplex" - -// convErr splits an error returned by parseFloatPrefix -// into a syntax or range error for ParseComplex. -func convErr(err error, s string) (syntax, range_ error) { - if x, ok := err.(*NumError); ok { - x.Func = fnParseComplex - x.Num = stringslite.Clone(s) - if x.Err == ErrRange { - return nil, x - } - } - return err, nil -} - // ParseComplex converts the string s to a complex number // with the precision specified by bitSize: 64 for complex64, or 128 for complex128. // When bitSize=64, the result still has type complex128, but it will be @@ -47,8 +30,6 @@ func ParseComplex(s string, bitSize int) (complex128, error) { size = 32 // complex64 uses float32 parts } - orig := s - // Remove parentheses, if any. if len(s) >= 2 && s[0] == '(' && s[len(s)-1] == ')' { s = s[1 : len(s)-1] @@ -59,10 +40,10 @@ func ParseComplex(s string, bitSize int) (complex128, error) { // Read real part (possibly imaginary part if followed by 'i'). re, n, err := parseFloatPrefix(s, size) if err != nil { - err, pending = convErr(err, orig) - if err != nil { + if err != ErrRange { return 0, err } + pending = err } s = s[n:] @@ -88,20 +69,20 @@ func ParseComplex(s string, bitSize int) (complex128, error) { } fallthrough default: - return 0, syntaxError(fnParseComplex, orig) + return 0, ErrSyntax } // Read imaginary part. im, n, err := parseFloatPrefix(s, size) if err != nil { - err, pending = convErr(err, orig) - if err != nil { + if err != ErrRange { return 0, err } + pending = err } s = s[n:] if s != "i" { - return 0, syntaxError(fnParseComplex, orig) + return 0, ErrSyntax } return complex(re, im), pending } diff --git a/src/strconv/atoc_test.go b/src/internal/strconv/atoc_test.go similarity index 88% rename from src/strconv/atoc_test.go rename to src/internal/strconv/atoc_test.go index 4c1aad09000..0a7741a4ccf 100644 --- a/src/strconv/atoc_test.go +++ b/src/internal/strconv/atoc_test.go @@ -7,8 +7,7 @@ package strconv_test import ( "math" "math/cmplx" - "reflect" - . "strconv" + . "internal/strconv" "testing" ) @@ -188,30 +187,24 @@ func TestParseComplex(t *testing.T) { } for i := range tests { test := &tests[i] - if test.err != nil { - test.err = &NumError{Func: "ParseComplex", Num: test.in, Err: test.err} + c, e := ParseComplex(test.in, 128) + if !sameComplex(c, test.out) || e != test.err { + t.Errorf("ParseComplex(%s, 128) = %v, %v, want %v, %v", test.in, c, e, test.out, test.err) } - got, err := ParseComplex(test.in, 128) - if !reflect.DeepEqual(err, test.err) { - t.Fatalf("ParseComplex(%q, 128) = %v, %v; want %v, %v", test.in, got, err, test.out, test.err) - } - if !(cmplx.IsNaN(test.out) && cmplx.IsNaN(got)) && got != test.out { - t.Fatalf("ParseComplex(%q, 128) = %v, %v; want %v, %v", test.in, got, err, test.out, test.err) - } - if complex128(complex64(test.out)) == test.out { - got, err := ParseComplex(test.in, 64) - if !reflect.DeepEqual(err, test.err) { - t.Fatalf("ParseComplex(%q, 64) = %v, %v; want %v, %v", test.in, got, err, test.out, test.err) - } - got64 := complex64(got) - if complex128(got64) != test.out { - t.Fatalf("ParseComplex(%q, 64) = %v, %v; want %v, %v", test.in, got, err, test.out, test.err) + c, e := ParseComplex(test.in, 64) + c64 := complex64(c) + if !sameComplex(complex128(c64) , test.out) || e != test.err { + t.Errorf("ParseComplex(%s, 64) = %v, %v, want %v, %v", test.in, c, e, test.out, test.err) } } } } +func sameComplex(c1, c2 complex128) bool { + return cmplx.IsNaN(c1) && cmplx.IsNaN(c2) || c1 == c2 +} + // Issue 42297: allow ParseComplex(s, not_32_or_64) for legacy reasons func TestParseComplexIncorrectBitSize(t *testing.T) { const s = "1.5e308+1.0e307i" diff --git a/src/strconv/atof.go b/src/internal/strconv/atof.go similarity index 95% rename from src/strconv/atof.go rename to src/internal/strconv/atof.go index fbbd84deb35..ada45dc0aa9 100644 --- a/src/strconv/atof.go +++ b/src/internal/strconv/atof.go @@ -10,8 +10,6 @@ package strconv // 2) Multiply/divide decimal by powers of two until in range [0.5, 1) // 3) Multiply by 2^precision and round to get mantissa. -import "math" - var optimize = true // set to false to force slow-path conversions for testing // commonPrefixLenIgnoreCase returns the length of the common @@ -58,11 +56,11 @@ func special(s string) (f float64, n int, ok bool) { n = 3 } if n == 3 || n == 8 { - return math.Inf(sign), nsign + n, true + return inf(sign), nsign + n, true } case 'n', 'N': if commonPrefixLenIgnoreCase(s, "nan") == 3 { - return math.NaN(), 3, true + return nan(), 3, true } } return 0, 0, false @@ -460,7 +458,7 @@ func atof64exact(mantissa uint64, exp int, neg bool) (f float64, ok bool) { // If possible to compute mantissa*10^exp to 32-bit float f exactly, // entirely in floating-point math, do so, avoiding the machinery above. func atof32exact(mantissa uint64, exp int, neg bool) (f float32, ok bool) { - if mantissa>>float32info.mantbits != 0 { + if mantissa>>float32MantBits != 0 { return } f = float32(mantissa) @@ -546,7 +544,7 @@ func atofHex(s string, flt *floatInfo, mantissa uint64, exp int, neg, trunc bool if exp > maxExp { // infinity and range error mantissa = 1 << flt.mantbits exp = maxExp + 1 - err = rangeError(fnParseFloat, s) + err = ErrRange } bits := mantissa & (1<", "(", ")", "i", "init"} { in := test.in + suffix - _, n, err := ParseFloatPrefix(in, 64) + _, n, err := parseFloatPrefix(in, 64) if err != nil { t.Errorf("ParseFloatPrefix(%q, 64): err = %v; want no error", in, err) } diff --git a/src/internal/strconv/atofeisel.go b/src/internal/strconv/atofeisel.go new file mode 100644 index 00000000000..5fa92908b49 --- /dev/null +++ b/src/internal/strconv/atofeisel.go @@ -0,0 +1,166 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package strconv + +// This file implements the Eisel-Lemire ParseFloat algorithm, published in +// 2020 and discussed extensively at +// https://nigeltao.github.io/blog/2020/eisel-lemire.html +// +// The original C++ implementation is at +// https://github.com/lemire/fast_double_parser/blob/644bef4306059d3be01a04e77d3cc84b379c596f/include/fast_double_parser.h#L840 +// +// This Go re-implementation closely follows the C re-implementation at +// https://github.com/google/wuffs/blob/ba3818cb6b473a2ed0b38ecfc07dbbd3a97e8ae7/internal/cgen/base/floatconv-submodule-code.c#L990 +// +// Additional testing (on over several million test strings) is done by +// https://github.com/nigeltao/parse-number-fxx-test-data/blob/5280dcfccf6d0b02a65ae282dad0b6d9de50e039/script/test-go-strconv.go + +import ( + "math/bits" +) + +func eiselLemire64(man uint64, exp10 int, neg bool) (f float64, ok bool) { + // The terse comments in this function body refer to sections of the + // https://nigeltao.github.io/blog/2020/eisel-lemire.html blog post. + + // Exp10 Range. + if man == 0 { + if neg { + f = float64frombits(0x8000000000000000) // Negative zero. + } + return f, true + } + pow, exp2, ok := pow10(exp10) + if !ok { + return 0, false + } + + // Normalization. + clz := bits.LeadingZeros64(man) + man <<= uint(clz) + retExp2 := uint64(exp2+63-float64Bias) - uint64(clz) + + // Multiplication. + xHi, xLo := bits.Mul64(man, pow.Hi) + + // Wider Approximation. + if xHi&0x1FF == 0x1FF && xLo+man < man { + yHi, yLo := bits.Mul64(man, pow.Lo) + mergedHi, mergedLo := xHi, xLo+yHi + if mergedLo < xLo { + mergedHi++ + } + if mergedHi&0x1FF == 0x1FF && mergedLo+1 == 0 && yLo+man < man { + return 0, false + } + xHi, xLo = mergedHi, mergedLo + } + + // Shifting to 54 Bits. + msb := xHi >> 63 + retMantissa := xHi >> (msb + 9) + retExp2 -= 1 ^ msb + + // Half-way Ambiguity. + if xLo == 0 && xHi&0x1FF == 0 && retMantissa&3 == 1 { + return 0, false + } + + // From 54 to 53 Bits. + retMantissa += retMantissa & 1 + retMantissa >>= 1 + if retMantissa>>53 > 0 { + retMantissa >>= 1 + retExp2 += 1 + } + // retExp2 is a uint64. Zero or underflow means that we're in subnormal + // float64 space. 0x7FF or above means that we're in Inf/NaN float64 space. + // + // The if block is equivalent to (but has fewer branches than): + // if retExp2 <= 0 || retExp2 >= 0x7FF { etc } + if retExp2-1 >= 0x7FF-1 { + return 0, false + } + retBits := retExp2<> 63 + retMantissa := xHi >> (msb + 38) + retExp2 -= 1 ^ msb + + // Half-way Ambiguity. + if xLo == 0 && xHi&0x3FFFFFFFFF == 0 && retMantissa&3 == 1 { + return 0, false + } + + // From 54 to 53 Bits (and for float32, it's from 25 to 24 bits). + retMantissa += retMantissa & 1 + retMantissa >>= 1 + if retMantissa>>24 > 0 { + retMantissa >>= 1 + retExp2 += 1 + } + // retExp2 is a uint64. Zero or underflow means that we're in subnormal + // float32 space. 0xFF or above means that we're in Inf/NaN float32 space. + // + // The if block is equivalent to (but has fewer branches than): + // if retExp2 <= 0 || retExp2 >= 0xFF { etc } + if retExp2-1 >= 0xFF-1 { + return 0, false + } + retBits := retExp2<> 63) @@ -65,8 +41,6 @@ const intSize = 32 << (^uint(0) >> 63) // IntSize is the size in bits of an int or uint value. const IntSize = intSize -const maxUint64 = 1<<64 - 1 - // ParseUint is like [ParseInt] but for unsigned numbers. // // A sign prefix is not permitted. @@ -74,7 +48,7 @@ func ParseUint(s string, base int, bitSize int) (uint64, error) { const fnParseUint = "ParseUint" if s == "" { - return 0, syntaxError(fnParseUint, s) + return 0, ErrSyntax } base0 := base == 0 @@ -105,13 +79,13 @@ func ParseUint(s string, base int, bitSize int) (uint64, error) { } default: - return 0, baseError(fnParseUint, s0, base) + return 0, ErrBase } if bitSize == 0 { bitSize = IntSize } else if bitSize < 0 || bitSize > 64 { - return 0, bitSizeError(fnParseUint, s0, bitSize) + return 0, ErrBitSize } // Cutoff is the smallest number such that cutoff*base > maxUint64. @@ -141,29 +115,29 @@ func ParseUint(s string, base int, bitSize int) (uint64, error) { case 'a' <= lower(c) && lower(c) <= 'z': d = lower(c) - 'a' + 10 default: - return 0, syntaxError(fnParseUint, s0) + return 0, ErrSyntax } if d >= byte(base) { - return 0, syntaxError(fnParseUint, s0) + return 0, ErrSyntax } if n >= cutoff { // n*base overflows - return maxVal, rangeError(fnParseUint, s0) + return maxVal, ErrRange } n *= uint64(base) n1 := n + uint64(d) if n1 < n || n1 > maxVal { // n+d overflows - return maxVal, rangeError(fnParseUint, s0) + return maxVal, ErrRange } n = n1 } if underscores && !underscoreOK(s0) { - return 0, syntaxError(fnParseUint, s0) + return 0, ErrSyntax } return n, nil @@ -198,11 +172,10 @@ func ParseInt(s string, base int, bitSize int) (i int64, err error) { const fnParseInt = "ParseInt" if s == "" { - return 0, syntaxError(fnParseInt, s) + return 0, ErrSyntax } // Pick off leading sign. - s0 := s neg := false switch s[0] { case '+': @@ -215,9 +188,7 @@ func ParseInt(s string, base int, bitSize int) (i int64, err error) { // Convert unsigned and check range. var un uint64 un, err = ParseUint(s, base, bitSize) - if err != nil && err.(*NumError).Err != ErrRange { - err.(*NumError).Func = fnParseInt - err.(*NumError).Num = stringslite.Clone(s0) + if err != nil && err != ErrRange { return 0, err } @@ -227,10 +198,10 @@ func ParseInt(s string, base int, bitSize int) (i int64, err error) { cutoff := uint64(1 << uint(bitSize-1)) if !neg && un >= cutoff { - return int64(cutoff - 1), rangeError(fnParseInt, s0) + return int64(cutoff - 1), ErrRange } if neg && un > cutoff { - return -int64(cutoff), rangeError(fnParseInt, s0) + return -int64(cutoff), ErrRange } n := int64(un) if neg { @@ -251,7 +222,7 @@ func Atoi(s string) (int, error) { if s[0] == '-' || s[0] == '+' { s = s[1:] if len(s) < 1 { - return 0, syntaxError(fnAtoi, s0) + return 0, ErrSyntax } } @@ -259,7 +230,7 @@ func Atoi(s string) (int, error) { for _, ch := range []byte(s) { ch -= '0' if ch > 9 { - return 0, syntaxError(fnAtoi, s0) + return 0, ErrSyntax } n = n*10 + int(ch) } @@ -271,9 +242,6 @@ func Atoi(s string) (int, error) { // Slow path for invalid, big, or underscored integers. i64, err := ParseInt(s, 10, 0) - if nerr, ok := err.(*NumError); ok { - nerr.Func = fnAtoi - } return int(i64), err } diff --git a/src/strconv/atoi_test.go b/src/internal/strconv/atoi_test.go similarity index 82% rename from src/strconv/atoi_test.go rename to src/internal/strconv/atoi_test.go index d7f8f25a295..e8f123ea5e5 100644 --- a/src/strconv/atoi_test.go +++ b/src/internal/strconv/atoi_test.go @@ -5,10 +5,9 @@ package strconv_test import ( - "errors" "fmt" "reflect" - . "strconv" + . "internal/strconv" "testing" ) @@ -316,47 +315,6 @@ var numErrorTests = []numErrorTest{ {"1\x00.2", `strconv.ParseFloat: parsing "1\x00.2": failed`}, } -func init() { - // The parse routines return NumErrors wrapping - // the error and the string. Convert the tables above. - for i := range parseUint64Tests { - test := &parseUint64Tests[i] - if test.err != nil { - test.err = &NumError{"ParseUint", test.in, test.err} - } - } - for i := range parseUint64BaseTests { - test := &parseUint64BaseTests[i] - if test.err != nil { - test.err = &NumError{"ParseUint", test.in, test.err} - } - } - for i := range parseInt64Tests { - test := &parseInt64Tests[i] - if test.err != nil { - test.err = &NumError{"ParseInt", test.in, test.err} - } - } - for i := range parseInt64BaseTests { - test := &parseInt64BaseTests[i] - if test.err != nil { - test.err = &NumError{"ParseInt", test.in, test.err} - } - } - for i := range parseUint32Tests { - test := &parseUint32Tests[i] - if test.err != nil { - test.err = &NumError{"ParseUint", test.in, test.err} - } - } - for i := range parseInt32Tests { - test := &parseInt32Tests[i] - if test.err != nil { - test.err = &NumError{"ParseInt", test.in, test.err} - } - } -} - func TestParseUint32(t *testing.T) { for i := range parseUint32Tests { test := &parseUint32Tests[i] @@ -475,62 +433,40 @@ func TestAtoi(t *testing.T) { for i := range parseInt32Tests { test := &parseInt32Tests[i] out, err := Atoi(test.in) - var testErr error - if test.err != nil { - testErr = &NumError{"Atoi", test.in, test.err.(*NumError).Err} - } - if int(test.out) != out || !reflect.DeepEqual(testErr, err) { - t.Errorf("Atoi(%q) = %v, %v want %v, %v", - test.in, out, err, test.out, testErr) + if out !=int(test.out) || err != test.err { + t.Errorf("Atoi(%q) = %v, %v, want %v, %v", test.in, out, err, test.out, test.err) } } case 64: for i := range parseInt64Tests { test := &parseInt64Tests[i] out, err := Atoi(test.in) - var testErr error - if test.err != nil { - testErr = &NumError{"Atoi", test.in, test.err.(*NumError).Err} - } - if test.out != int64(out) || !reflect.DeepEqual(testErr, err) { - t.Errorf("Atoi(%q) = %v, %v want %v, %v", - test.in, out, err, test.out, testErr) + if int64(out) != test.out || err != test.err { + t.Errorf("Atoi(%q) = %v, %v, want %v, %v", test.in, out, err, test.out, test.err) } } } } -func bitSizeErrStub(name string, bitSize int) error { - return BitSizeError(name, "0", bitSize) -} - -func baseErrStub(name string, base int) error { - return BaseError(name, "0", base) -} - -func noErrStub(name string, arg int) error { - return nil -} - type parseErrorTest struct { arg int - errStub func(name string, arg int) error + err error } var parseBitSizeTests = []parseErrorTest{ - {-1, bitSizeErrStub}, - {0, noErrStub}, - {64, noErrStub}, - {65, bitSizeErrStub}, + {-1, ErrBitSize}, + {0, nil}, + {64, nil}, + {65, ErrBitSize}, } var parseBaseTests = []parseErrorTest{ - {-1, baseErrStub}, - {0, noErrStub}, - {1, baseErrStub}, - {2, noErrStub}, - {36, noErrStub}, - {37, baseErrStub}, + {-1, ErrBase}, + {0, nil}, + {1, ErrBase}, + {2, nil}, + {36, nil}, + {37, ErrBase}, } func equalError(a, b error) bool { @@ -546,11 +482,10 @@ func equalError(a, b error) bool { func TestParseIntBitSize(t *testing.T) { for i := range parseBitSizeTests { test := &parseBitSizeTests[i] - testErr := test.errStub("ParseInt", test.arg) _, err := ParseInt("0", 0, test.arg) - if !equalError(testErr, err) { + if err != test.err { t.Errorf("ParseInt(\"0\", 0, %v) = 0, %v want 0, %v", - test.arg, err, testErr) + test.arg, err, test.err) } } } @@ -558,11 +493,10 @@ func TestParseIntBitSize(t *testing.T) { func TestParseUintBitSize(t *testing.T) { for i := range parseBitSizeTests { test := &parseBitSizeTests[i] - testErr := test.errStub("ParseUint", test.arg) _, err := ParseUint("0", 0, test.arg) - if !equalError(testErr, err) { + if err != test.err { t.Errorf("ParseUint(\"0\", 0, %v) = 0, %v want 0, %v", - test.arg, err, testErr) + test.arg, err, test.err) } } } @@ -570,11 +504,10 @@ func TestParseUintBitSize(t *testing.T) { func TestParseIntBase(t *testing.T) { for i := range parseBaseTests { test := &parseBaseTests[i] - testErr := test.errStub("ParseInt", test.arg) _, err := ParseInt("0", test.arg, 0) - if !equalError(testErr, err) { + if err != test.err { t.Errorf("ParseInt(\"0\", %v, 0) = 0, %v want 0, %v", - test.arg, err, testErr) + test.arg, err, test.err) } } } @@ -582,35 +515,14 @@ func TestParseIntBase(t *testing.T) { func TestParseUintBase(t *testing.T) { for i := range parseBaseTests { test := &parseBaseTests[i] - testErr := test.errStub("ParseUint", test.arg) _, err := ParseUint("0", test.arg, 0) - if !equalError(testErr, err) { + if err != test.err { t.Errorf("ParseUint(\"0\", %v, 0) = 0, %v want 0, %v", - test.arg, err, testErr) + test.arg, err, test.err) } } } -func TestNumError(t *testing.T) { - for _, test := range numErrorTests { - err := &NumError{ - Func: "ParseFloat", - Num: test.num, - Err: errors.New("failed"), - } - if got := err.Error(); got != test.want { - t.Errorf(`(&NumError{"ParseFloat", %q, "failed"}).Error() = %v, want %v`, test.num, got, test.want) - } - } -} - -func TestNumErrorUnwrap(t *testing.T) { - err := &NumError{Err: ErrSyntax} - if !errors.Is(err, ErrSyntax) { - t.Error("errors.Is failed, wanted success") - } -} - func BenchmarkParseInt(b *testing.B) { b.Run("Pos", func(b *testing.B) { benchmarkParseInt(b, 1) diff --git a/src/strconv/ctoa.go b/src/internal/strconv/ctoa.go similarity index 59% rename from src/strconv/ctoa.go rename to src/internal/strconv/ctoa.go index fd7f941d703..89921c856c8 100644 --- a/src/strconv/ctoa.go +++ b/src/internal/strconv/ctoa.go @@ -12,16 +12,29 @@ package strconv // It rounds the result assuming that the original was obtained from a complex // value of bitSize bits, which must be 64 for complex64 and 128 for complex128. func FormatComplex(c complex128, fmt byte, prec, bitSize int) string { + var buf [64]byte + return string(AppendComplex(buf[:0], c, fmt, prec, bitSize)) +} + +// AppendComplex appends the result of FormatComplex to dst. +// It is here for the runtime. +// There is no public strconv.AppendComplex. +func AppendComplex(dst []byte, c complex128, fmt byte, prec, bitSize int) []byte { if bitSize != 64 && bitSize != 128 { panic("invalid bitSize") } bitSize >>= 1 // complex64 uses float32 internally + dst = append(dst, '(') + dst = AppendFloat(dst, real(c), fmt, prec, bitSize) + i := len(dst) + dst = AppendFloat(dst, imag(c), fmt, prec, bitSize) // Check if imaginary part has a sign. If not, add one. - im := FormatFloat(imag(c), fmt, prec, bitSize) - if im[0] != '+' && im[0] != '-' { - im = "+" + im + if dst[i] != '+' && dst[i] != '-' { + dst = append(dst, 0) + copy(dst[i+1:], dst[i:]) + dst[i] = '+' } - - return "(" + FormatFloat(real(c), fmt, prec, bitSize) + im + "i)" + dst = append(dst, "i)"...) + return dst } diff --git a/src/strconv/ctoa_test.go b/src/internal/strconv/ctoa_test.go similarity index 98% rename from src/strconv/ctoa_test.go rename to src/internal/strconv/ctoa_test.go index 8b77898eccc..c24f30272a2 100644 --- a/src/strconv/ctoa_test.go +++ b/src/internal/strconv/ctoa_test.go @@ -5,7 +5,7 @@ package strconv_test import ( - . "strconv" + . "internal/strconv" "testing" ) diff --git a/src/strconv/decimal.go b/src/internal/strconv/decimal.go similarity index 100% rename from src/strconv/decimal.go rename to src/internal/strconv/decimal.go diff --git a/src/strconv/decimal_test.go b/src/internal/strconv/decimal_test.go similarity index 99% rename from src/strconv/decimal_test.go rename to src/internal/strconv/decimal_test.go index 13a127f5b2c..a60096e1c8e 100644 --- a/src/strconv/decimal_test.go +++ b/src/internal/strconv/decimal_test.go @@ -5,7 +5,7 @@ package strconv_test import ( - . "strconv" + . "internal/strconv" "testing" ) diff --git a/src/internal/strconv/deps.go b/src/internal/strconv/deps.go new file mode 100644 index 00000000000..04331130c21 --- /dev/null +++ b/src/internal/strconv/deps.go @@ -0,0 +1,30 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package strconv + +import "unsafe" + +// Implementations to avoid importing other dependencies. + +// package math + +func float64frombits(b uint64) float64 { return *(*float64)(unsafe.Pointer(&b)) } +func float32frombits(b uint32) float32 { return *(*float32)(unsafe.Pointer(&b)) } +func float64bits(f float64) uint64 { return *(*uint64)(unsafe.Pointer(&f)) } +func float32bits(f float32) uint32 { return *(*uint32)(unsafe.Pointer(&f)) } + +func inf(sign int) float64 { + var v uint64 + if sign >= 0 { + v = 0x7FF0000000000000 + } else { + v = 0xFFF0000000000000 + } + return float64frombits(v) +} + +func isNaN(f float64) (is bool) { return f != f } + +func nan() float64 { return float64frombits(0x7FF8000000000001) } diff --git a/src/internal/strconv/export_test.go b/src/internal/strconv/export_test.go new file mode 100644 index 00000000000..c879f24480a --- /dev/null +++ b/src/internal/strconv/export_test.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package strconv + +type Uint128 = uint128 + +const ( + Pow10Min = pow10Min + Pow10Max = pow10Max +) + +var ( + MulLog10_2 = mulLog10_2 + MulLog2_10 = mulLog2_10 + ParseFloatPrefix = parseFloatPrefix + Pow10 = pow10 + Umul128 = umul128 + Umul192 = umul192 + Div5Tab = div5Tab + DivisiblePow5 = divisiblePow5 + TrimZeros = trimZeros +) + +func NewDecimal(i uint64) *decimal { + d := new(decimal) + d.Assign(i) + return d +} + +func SetOptimize(b bool) bool { + old := optimize + optimize = b + return old +} diff --git a/src/strconv/fp_test.go b/src/internal/strconv/fp_test.go similarity index 81% rename from src/strconv/fp_test.go rename to src/internal/strconv/fp_test.go index fd73958c97d..ba739941cc8 100644 --- a/src/strconv/fp_test.go +++ b/src/internal/strconv/fp_test.go @@ -6,9 +6,9 @@ package strconv_test import ( "bufio" + _ "embed" "fmt" - "os" - "strconv" + "internal/strconv" "strings" "testing" ) @@ -92,23 +92,21 @@ func myatof32(s string) (f float32, ok bool) { return f1, true } +//go:embed testdata/testfp.txt +var testfp string + func TestFp(t *testing.T) { - f, err := os.Open("testdata/testfp.txt") - if err != nil { - t.Fatal("testfp: open testdata/testfp.txt:", err) - } - defer f.Close() - - s := bufio.NewScanner(f) - + s := bufio.NewScanner(strings.NewReader(testfp)) for lineno := 1; s.Scan(); lineno++ { line := s.Text() - if len(line) == 0 || line[0] == '#' { + line, _, _ = strings.Cut(line, "#") + line = strings.TrimSpace(line) + if line == "" { continue } a := strings.Split(line, " ") if len(a) != 4 { - t.Error("testdata/testfp.txt:", lineno, ": wrong field count") + t.Errorf("testdata/testfp.txt:%d: wrong field count", lineno) continue } var s string @@ -118,22 +116,21 @@ func TestFp(t *testing.T) { var ok bool v, ok = myatof64(a[2]) if !ok { - t.Error("testdata/testfp.txt:", lineno, ": cannot atof64 ", a[2]) + t.Errorf("testdata/testfp.txt:%d: cannot atof64 %s", lineno, a[2]) continue } s = fmt.Sprintf(a[1], v) case "float32": v1, ok := myatof32(a[2]) if !ok { - t.Error("testdata/testfp.txt:", lineno, ": cannot atof32 ", a[2]) + t.Errorf("testdata/testfp.txt:%d: cannot atof32 %s", lineno, a[2]) continue } s = fmt.Sprintf(a[1], v1) v = float64(v1) } if s != a[3] { - t.Error("testdata/testfp.txt:", lineno, ": ", a[0], " ", a[1], " ", a[2], " (", v, ") ", - "want ", a[3], " got ", s) + t.Errorf("testdata/testfp.txt:%d: %s %s %s %s: have %s want %s", lineno, a[0], a[1], a[2], a[3], s, a[3]) } } if s.Err() != nil { diff --git a/src/strconv/ftoa.go b/src/internal/strconv/ftoa.go similarity index 90% rename from src/strconv/ftoa.go rename to src/internal/strconv/ftoa.go index 629df382402..64be29e23ef 100644 --- a/src/strconv/ftoa.go +++ b/src/internal/strconv/ftoa.go @@ -10,24 +10,30 @@ package strconv -import ( - "internal/ftoa" - "math" +const ( + lowerhex = "0123456789abcdef" + upperhex = "0123456789ABCDEF" ) -// TODO: move elsewhere? type floatInfo struct { mantbits uint expbits uint bias int } -var float32info = floatInfo{23, 8, -127} -var float64info = floatInfo{52, 11, -1023} +const ( + float32MantBits = 23 + float32ExpBits = 8 + float32Bias = -127 + float64MantBits = 52 + float64ExpBits = 11 + float64Bias = -1023 +) -func init() { - ftoa.SetFormatFloat(FormatFloat) -} +var ( + float32info = floatInfo{float32MantBits, float32ExpBits, float32Bias} + float64info = floatInfo{float64MantBits, float64ExpBits, float64Bias} +) // FormatFloat converts the floating-point number f to a string, // according to the format fmt and precision prec. It rounds the @@ -68,10 +74,10 @@ func genericFtoa(dst []byte, val float64, fmt byte, prec, bitSize int) []byte { var flt *floatInfo switch bitSize { case 32: - bits = uint64(math.Float32bits(float32(val))) + bits = uint64(float32bits(float32(val))) flt = &float32info case 64: - bits = math.Float64bits(val) + bits = float64bits(val) flt = &float64info default: panic("strconv: illegal AppendFloat/FormatFloat bitSize") @@ -117,16 +123,17 @@ func genericFtoa(dst []byte, val float64, fmt byte, prec, bitSize int) []byte { return bigFtoa(dst, prec, fmt, neg, mant, exp, flt) } - var digs decimalSlice - ok := false // Negative precision means "only as much as needed to be exact." shortest := prec < 0 + var digs decimalSlice + if mant == 0 { + return formatDigits(dst, shortest, neg, digs, prec, fmt) + } if shortest { // Use Ryu algorithm. var buf [32]byte digs.d = buf[:] ryuFtoaShortest(&digs, mant, exp-int(flt.mantbits), flt) - ok = true // Precision for shortest representation mode. switch fmt { case 'e', 'E': @@ -136,36 +143,44 @@ func genericFtoa(dst []byte, val float64, fmt byte, prec, bitSize int) []byte { case 'g', 'G': prec = digs.nd } - } else if fmt != 'f' { - // Fixed number of digits. - digits := prec - switch fmt { - case 'e', 'E': - digits++ - case 'g', 'G': - if prec == 0 { - prec = 1 - } - digits = prec - default: - // Invalid mode. - digits = 1 - } - var buf [24]byte - if bitSize == 32 && digits <= 9 { - digs.d = buf[:] - ryuFtoaFixed32(&digs, uint32(mant), exp-int(flt.mantbits), digits) - ok = true - } else if digits <= 18 { - digs.d = buf[:] - ryuFtoaFixed64(&digs, mant, exp-int(flt.mantbits), digits) - ok = true - } + return formatDigits(dst, shortest, neg, digs, prec, fmt) } - if !ok { - return bigFtoa(dst, prec, fmt, neg, mant, exp, flt) + + // Fixed number of digits. + digits := prec + switch fmt { + case 'f': + // %f precision specifies digits after the decimal point. + // Estimate an upper bound on the total number of digits needed. + // ftoaFixed will shorten as needed according to prec. + if exp >= 0 { + digits = 1 + mulLog10_2(1+exp) + prec + } else { + digits = 1 + prec - mulLog10_2(-exp) + } + case 'e', 'E': + digits++ + case 'g', 'G': + if prec == 0 { + prec = 1 + } + digits = prec + default: + // Invalid mode. + digits = 1 } - return formatDigits(dst, shortest, neg, digs, prec, fmt) + if digits <= 18 { + // digits <= 0 happens for %f on very small numbers + // and means that we're guaranteed to print all zeros. + if digits > 0 { + var buf [24]byte + digs.d = buf[:] + fixedFtoa(&digs, mant, exp-int(flt.mantbits), digits, prec, fmt) + } + return formatDigits(dst, false, neg, digs, prec, fmt) + } + + return bigFtoa(dst, prec, fmt, neg, mant, exp, flt) } // bigFtoa uses multiprecision computations to format a float. @@ -480,7 +495,7 @@ func fmtB(dst []byte, neg bool, mant uint64, exp int, flt *floatInfo) []byte { } // mantissa - dst, _ = formatBits(dst, mant, 10, false, true) + dst = AppendUint(dst, mant, 10) // p dst = append(dst, 'p') @@ -490,7 +505,7 @@ func fmtB(dst []byte, neg bool, mant uint64, exp int, flt *floatInfo) []byte { if exp >= 0 { dst = append(dst, '+') } - dst, _ = formatBits(dst, uint64(exp), 10, exp < 0, true) + dst = AppendInt(dst, int64(exp), 10) return dst } diff --git a/src/strconv/ftoa_test.go b/src/internal/strconv/ftoa_test.go similarity index 82% rename from src/strconv/ftoa_test.go rename to src/internal/strconv/ftoa_test.go index 3512ccf5807..0393c3e17c3 100644 --- a/src/strconv/ftoa_test.go +++ b/src/internal/strconv/ftoa_test.go @@ -5,9 +5,9 @@ package strconv_test import ( + . "internal/strconv" "math" "math/rand" - . "strconv" "testing" ) @@ -42,6 +42,29 @@ var ftoatests = []ftoaTest{ {2000000, 'g', -1, "2e+06"}, {1e10, 'g', -1, "1e+10"}, + // f conversion basic cases + {12345, 'f', 2, "12345.00"}, + {1234.5, 'f', 2, "1234.50"}, + {123.45, 'f', 2, "123.45"}, + {12.345, 'f', 2, "12.35"}, + {1.2345, 'f', 2, "1.23"}, + {0.12345, 'f', 2, "0.12"}, + {0.12945, 'f', 2, "0.13"}, + {0.012345, 'f', 2, "0.01"}, + {0.015, 'f', 2, "0.01"}, + {0.016, 'f', 2, "0.02"}, + {0.0052345, 'f', 2, "0.01"}, + {0.0012345, 'f', 2, "0.00"}, + {0.00012345, 'f', 2, "0.00"}, + {0.000012345, 'f', 2, "0.00"}, + + {0.996644984, 'f', 6, "0.996645"}, + {0.996644984, 'f', 5, "0.99664"}, + {0.996644984, 'f', 4, "0.9966"}, + {0.996644984, 'f', 3, "0.997"}, + {0.996644984, 'f', 2, "1.00"}, + {0.996644984, 'f', 1, "1.0"}, + // g conversion and zero suppression {400, 'g', 2, "4e+02"}, {40, 'g', 2, "40"}, @@ -172,6 +195,21 @@ var ftoatests = []ftoaTest{ {3.999969482421875, 'x', 2, "0x1.00p+02"}, {3.999969482421875, 'x', 1, "0x1.0p+02"}, {3.999969482421875, 'x', 0, "0x1p+02"}, + + // Cases that Java once mishandled, from David Chase. + {1.801439850948199e+16, 'g', -1, "1.801439850948199e+16"}, + {5.960464477539063e-08, 'g', -1, "5.960464477539063e-08"}, + {1.012e-320, 'g', -1, "1.012e-320"}, + + // Cases from TestFtoaRandom that caught bugs in fixedFtoa. + {8177880169308380. * (1 << 1), 'e', 14, "1.63557603386168e+16"}, + {8393378656576888. * (1 << 1), 'e', 15, "1.678675731315378e+16"}, + {8738676561280626. * (1 << 4), 'e', 16, "1.3981882498049002e+17"}, + {8291032395191335. / (1 << 30), 'e', 5, "7.72163e+06"}, + + // Exercise divisiblePow5 case in fixedFtoa + {2384185791015625. * (1 << 12), 'e', 5, "9.76562e+18"}, + {2384185791015625. * (1 << 13), 'e', 5, "1.95312e+19"}, } func TestFtoa(t *testing.T) { @@ -186,13 +224,20 @@ func TestFtoa(t *testing.T) { t.Error("AppendFloat testN=64", test.f, string(test.fmt), test.prec, "want", "abc"+test.s, "got", string(x)) } if float64(float32(test.f)) == test.f && test.fmt != 'b' { + test_s := test.s + if test.f == 5.960464477539063e-08 { + // This test is an exact float32 but asking for float64 precision in the string. + // (All our other float64-only tests fail to exactness check above.) + test_s = "5.9604645e-08" + continue + } s := FormatFloat(test.f, test.fmt, test.prec, 32) if s != test.s { - t.Error("testN=32", test.f, string(test.fmt), test.prec, "want", test.s, "got", s) + t.Error("testN=32", test.f, string(test.fmt), test.prec, "want", test_s, "got", s) } x := AppendFloat([]byte("abc"), test.f, test.fmt, test.prec, 32) - if string(x) != "abc"+test.s { - t.Error("AppendFloat testN=32", test.f, string(test.fmt), test.prec, "want", "abc"+test.s, "got", string(x)) + if string(x) != "abc"+test_s { + t.Error("AppendFloat testN=32", test.f, string(test.fmt), test.prec, "want", "abc"+test_s, "got", string(x)) } } } @@ -241,7 +286,7 @@ func TestFtoaRandom(t *testing.T) { shortSlow = FormatFloat(x, 'e', prec, 64) SetOptimize(true) if shortSlow != shortFast { - t.Errorf("%b printed as %s, want %s", x, shortFast, shortSlow) + t.Errorf("%b printed with %%.%de as %s, want %s", x, prec, shortFast, shortSlow) } } } @@ -282,8 +327,10 @@ var ftoaBenches = []struct { {"64Fixed1", 123456, 'e', 3, 64}, {"64Fixed2", 123.456, 'e', 3, 64}, + {"64Fixed2.5", 1.2345e+06, 'e', 3, 64}, {"64Fixed3", 1.23456e+78, 'e', 3, 64}, {"64Fixed4", 1.23456e-78, 'e', 3, 64}, + {"64Fixed5Hard", 4.096e+25, 'e', 5, 64}, // needs divisiblePow5(..., 20) {"64Fixed12", 1.23456e-78, 'e', 12, 64}, {"64Fixed16", 1.23456e-78, 'e', 16, 64}, // From testdata/testfp.txt @@ -291,6 +338,10 @@ var ftoaBenches = []struct { {"64Fixed17Hard", math.Ldexp(8887055249355788, 665), 'e', 17, 64}, {"64Fixed18Hard", math.Ldexp(6994187472632449, 690), 'e', 18, 64}, + {"64FixedF1", 123.456, 'f', 6, 64}, + {"64FixedF2", 0.0123, 'f', 6, 64}, + {"64FixedF3", 12.3456, 'f', 2, 64}, + // Trigger slow path (see issue #15672). // The shortest is: 8.034137530808823e+43 {"Slowpath64", 8.03413753080882349e+43, 'e', -1, 64}, diff --git a/src/internal/strconv/ftoafixed.go b/src/internal/strconv/ftoafixed.go new file mode 100644 index 00000000000..7f297e924e1 --- /dev/null +++ b/src/internal/strconv/ftoafixed.go @@ -0,0 +1,184 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package strconv + +import "math/bits" + +var uint64pow10 = [...]uint64{ + 1, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, +} + +// fixedFtoa formats a number of decimal digits of mant*(2^exp) into d, +// where mant > 0 and 1 ≤ digits ≤ 18. +// If fmt == 'f', digits is a conservative overestimate, and the final +// number of digits is prec past the decimal point. +func fixedFtoa(d *decimalSlice, mant uint64, exp, digits, prec int, fmt byte) { + // The strategy here is to multiply (mant * 2^exp) by a power of 10 + // to make the resulting integer be the number of digits we want. + // + // Adams proved in the Ryu paper that 128-bit precision in the + // power-of-10 constant is sufficient to produce correctly + // rounded output for all float64s, up to 18 digits. + // https://dl.acm.org/doi/10.1145/3192366.3192369 + // + // TODO(rsc): The paper is not focused on, nor terribly clear about, + // this fact in this context, and the proof seems too complicated. + // Post a shorter, more direct proof and link to it here. + + if digits > 18 { + panic("fixedFtoa called with digits > 18") + } + + // Shift mantissa to have 64 bits, + // so that the 192-bit product below will + // have at least 63 bits in its top word. + b := 64 - bits.Len64(mant) + mant <<= b + exp -= b + + // We have f = mant * 2^exp ≥ 2^(63+exp) + // and we want to multiply it by some 10^p + // to make it have the number of digits plus one rounding bit: + // + // 2 * 10^(digits-1) ≤ f * 10^p < ~2 * 10^digits + // + // The lower bound is required, but the upper bound is approximate: + // we must not have too few digits, but we can round away extra ones. + // + // f * 10^p ≥ 2 * 10^(digits-1) + // 10^p ≥ 2 * 10^(digits-1) / f [dividing by f] + // p ≥ (log₁₀ 2) + (digits-1) - log₁₀ f [taking log₁₀] + // p ≥ (log₁₀ 2) + (digits-1) - log₁₀ (mant * 2^exp) [expanding f] + // p ≥ (log₁₀ 2) + (digits-1) - (log₁₀ 2) * (64 + exp) [mant < 2⁶⁴] + // p ≥ (digits - 1) - (log₁₀ 2) * (63 + exp) [refactoring] + // + // Once we have p, we can compute the scaled value: + // + // dm * 2^de = mant * 2^exp * 10^p + // = mant * 2^exp * pow/2^128 * 2^exp2. + // = (mant * pow/2^128) * 2^(exp+exp2). + p := (digits - 1) - mulLog10_2(63+exp) + pow, exp2, ok := pow10(p) + if !ok { + // This never happens due to the range of float32/float64 exponent + panic("fixedFtoa: pow10 out of range") + } + if -22 <= p && p < 0 { + // Special case: Let q=-p. q is in [1,22]. We are dividing by 10^q + // and the mantissa may be a multiple of 5^q (5^22 < 2^53), + // in which case the division must be computed exactly and + // recorded as exact for correct rounding. Our normal computation is: + // + // dm = floor(mant * floor(10^p * 2^s)) + // + // for some scaling shift s. To make this an exact division, + // it suffices to change the inner floor to a ceil: + // + // dm = floor(mant * ceil(10^p * 2^s)) + // + // In the range of values we are using, the floor and ceil + // cancel each other out and the high 64 bits of the product + // come out exactly right. + // (This is the same trick compilers use for division by constants. + // See Hacker's Delight, 2nd ed., Chapter 10.) + pow.Lo++ + } + dm, lo1, lo0 := umul192(mant, pow) + de := exp + exp2 + + // Check whether any bits have been truncated from dm. + // If so, set dt != 0. If not, leave dt == 0 (meaning dm is exact). + var dt uint + switch { + default: + // Most powers of 10 use a truncated constant, + // meaning the result is also truncated. + dt = 1 + case 0 <= p && p <= 55: + // Small positive powers of 10 (up to 10⁵⁵) can be represented + // precisely in a 128-bit mantissa (5⁵⁵ ≤ 2¹²⁸), so the only truncation + // comes from discarding the low bits of the 192-bit product. + // + // TODO(rsc): The new proof mentioned above should also + // prove that we can't have lo1 == 0 and lo0 != 0. + // After proving that, drop computation and use of lo0 here. + dt = bool2uint(lo1|lo0 != 0) + case -22 <= p && p < 0 && divisiblePow5(mant, -p): + // If the original mantissa was a multiple of 5^p, + // the result is exact. (See comment above for pow.Lo++.) + dt = 0 + } + + // The value we want to format is dm * 2^de, where de < 0. + // Multply by 2^de by shifting, but leave one extra bit for rounding. + // After the shift, the "integer part" of dm is dm>>1, + // the "rounding bit" (the first fractional bit) is dm&1, + // and the "truncated bit" (have any bits been discarded?) is dt. + shift := -de - 1 + dt |= bool2uint(dm&(1<>= shift + + // Set decimal point in eventual formatted digits, + // so we can update it as we adjust the digits. + d.dp = digits - p + + // Trim excess digit if any, updating truncation and decimal point. + // The << 1 is leaving room for the rounding bit. + max := uint64pow10[digits] << 1 + if dm >= max { + var r uint + dm, r = dm/10, uint(dm%10) + dt |= bool2uint(r != 0) + d.dp++ + } + + // If this is %.*f we may have overestimated the digits needed. + // Now that we know where the decimal point is, + // trim to the actual number of digits, which is d.dp+prec. + if fmt == 'f' && digits != d.dp+prec { + for digits > d.dp+prec { + var r uint + dm, r = dm/10, uint(dm%10) + dt |= bool2uint(r != 0) + digits-- + } + + // Dropping those digits can create a new leftmost + // non-zero digit, like if we are formatting %.1f and + // convert 0.09 -> 0.1. Detect and adjust for that. + if digits <= 0 { + digits = 1 + d.dp++ + } + + max = uint64pow10[digits] << 1 + } + + // Round and shift away rounding bit. + // We want to round up when + // (a) the fractional part is > 0.5 (dm&1 != 0 and dt == 1) + // (b) or the fractional part is ≥ 0.5 and the integer part is odd + // (dm&1 != 0 and dm&2 != 0). + // The bitwise expression encodes that logic. + dm += uint64(uint(dm) & (dt | uint(dm)>>1) & 1) + dm >>= 1 + if dm == max>>1 { + // 999... rolled over to 1000... + dm = uint64pow10[digits-1] + d.dp++ + } + + // Format digits into d. + if dm != 0 { + if formatBase10(d.d[:digits], dm) != 0 { + panic("formatBase10") + } + d.nd = digits + for d.d[d.nd-1] == '0' { + d.nd-- + } + } +} diff --git a/src/internal/strconv/ftoaryu.go b/src/internal/strconv/ftoaryu.go new file mode 100644 index 00000000000..9407bfec445 --- /dev/null +++ b/src/internal/strconv/ftoaryu.go @@ -0,0 +1,307 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package strconv + +import "math/bits" + +// binary to decimal conversion using the Ryū algorithm. +// +// See Ulf Adams, "Ryū: Fast Float-to-String Conversion" (doi:10.1145/3192366.3192369) + +// ryuFtoaShortest formats mant*2^exp with prec decimal digits. +func ryuFtoaShortest(d *decimalSlice, mant uint64, exp int, flt *floatInfo) { + if mant == 0 { + d.nd, d.dp = 0, 0 + return + } + // If input is an exact integer with fewer bits than the mantissa, + // the previous and next integer are not admissible representations. + if exp <= 0 && bits.TrailingZeros64(mant) >= -exp { + mant >>= uint(-exp) + ryuDigits(d, mant, mant, mant, true, false) + return + } + ml, mc, mu, e2 := computeBounds(mant, exp, flt) + if e2 == 0 { + ryuDigits(d, ml, mc, mu, true, false) + return + } + // Find 10^q *larger* than 2^-e2 + q := mulLog10_2(-e2) + 1 + + // We are going to multiply by 10^q using 128-bit arithmetic. + // The exponent is the same for all 3 numbers. + var dl, dc, du uint64 + var dl0, dc0, du0 bool + if flt == &float32info { + var dl32, dc32, du32 uint32 + dl32, _, dl0 = mult64bitPow10(uint32(ml), e2, q) + dc32, _, dc0 = mult64bitPow10(uint32(mc), e2, q) + du32, e2, du0 = mult64bitPow10(uint32(mu), e2, q) + dl, dc, du = uint64(dl32), uint64(dc32), uint64(du32) + } else { + dl, _, dl0 = mult128bitPow10(ml, e2, q) + dc, _, dc0 = mult128bitPow10(mc, e2, q) + du, e2, du0 = mult128bitPow10(mu, e2, q) + } + if e2 >= 0 { + panic("not enough significant bits after mult128bitPow10") + } + // Is it an exact computation? + if q > 55 { + // Large positive powers of ten are not exact + dl0, dc0, du0 = false, false, false + } + if q < 0 && q >= -24 { + // Division by a power of ten may be exact. + // (note that 5^25 is a 59-bit number so division by 5^25 is never exact). + if divisiblePow5(ml, -q) { + dl0 = true + } + if divisiblePow5(mc, -q) { + dc0 = true + } + if divisiblePow5(mu, -q) { + du0 = true + } + } + // Express the results (dl, dc, du)*2^e2 as integers. + // Extra bits must be removed and rounding hints computed. + extra := uint(-e2) + extraMask := uint64(1<>extra, dl&extraMask + dc, fracc := dc>>extra, dc&extraMask + du, fracu := du>>extra, du&extraMask + // Is it allowed to use 'du' as a result? + // It is always allowed when it is truncated, but also + // if it is exact and the original binary mantissa is even + // When disallowed, we can subtract 1. + uok := !du0 || fracu > 0 + if du0 && fracu == 0 { + uok = mant&1 == 0 + } + if !uok { + du-- + } + // Is 'dc' the correctly rounded base 10 mantissa? + // The correct rounding might be dc+1 + cup := false // don't round up. + if dc0 { + // If we computed an exact product, the half integer + // should round to next (even) integer if 'dc' is odd. + cup = fracc > 1<<(extra-1) || + (fracc == 1<<(extra-1) && dc&1 == 1) + } else { + // otherwise, the result is a lower truncation of the ideal + // result. + cup = fracc>>(extra-1) == 1 + } + // Is 'dl' an allowed representation? + // Only if it is an exact value, and if the original binary mantissa + // was even. + lok := dl0 && fracl == 0 && (mant&1 == 0) + if !lok { + dl++ + } + // We need to remember whether the trimmed digits of 'dc' are zero. + c0 := dc0 && fracc == 0 + // render digits + ryuDigits(d, dl, dc, du, c0, cup) + d.dp -= q +} + +// computeBounds returns a floating-point vector (l, c, u)×2^e2 +// where the mantissas are 55-bit (or 26-bit) integers, describing the interval +// represented by the input float64 or float32. +func computeBounds(mant uint64, exp int, flt *floatInfo) (lower, central, upper uint64, e2 int) { + if mant != 1< 5e8) || (clo == 5e8 && cup) + ryuDigits32(d, lhi, chi, uhi, c0, cup, 8) + d.dp += 9 + } else { + d.nd = 0 + // emit high part + n := uint(9) + for v := chi; v > 0; { + v1, v2 := v/10, v%10 + v = v1 + n-- + d.d[n] = byte(v2 + '0') + } + d.d = d.d[n:] + d.nd = int(9 - n) + // emit low part + ryuDigits32(d, llo, clo, ulo, + c0, cup, d.nd+8) + } + // trim trailing zeros + for d.nd > 0 && d.d[d.nd-1] == '0' { + d.nd-- + } + // trim initial zeros + for d.nd > 0 && d.d[0] == '0' { + d.nd-- + d.dp-- + d.d = d.d[1:] + } +} + +// ryuDigits32 emits decimal digits for a number less than 1e9. +func ryuDigits32(d *decimalSlice, lower, central, upper uint32, + c0, cup bool, endindex int) { + if upper == 0 { + d.dp = endindex + 1 + return + } + trimmed := 0 + // Remember last trimmed digit to check for round-up. + // c0 will be used to remember zeroness of following digits. + cNextDigit := 0 + for upper > 0 { + // Repeatedly compute: + // l = Ceil(lower / 10^k) + // c = Round(central / 10^k) + // u = Floor(upper / 10^k) + // and stop when c goes out of the (l, u) interval. + l := (lower + 9) / 10 + c, cdigit := central/10, central%10 + u := upper / 10 + if l > u { + // don't trim the last digit as it is forbidden to go below l + // other, trim and exit now. + break + } + // Check that we didn't cross the lower boundary. + // The case where l < u but c == l-1 is essentially impossible, + // but may happen if: + // lower = ..11 + // central = ..19 + // upper = ..31 + // and means that 'central' is very close but less than + // an integer ending with many zeros, and usually + // the "round-up" logic hides the problem. + if l == c+1 && c < u { + c++ + cdigit = 0 + cup = false + } + trimmed++ + // Remember trimmed digits of c + c0 = c0 && cNextDigit == 0 + cNextDigit = int(cdigit) + lower, central, upper = l, c, u + } + // should we round up? + if trimmed > 0 { + cup = cNextDigit > 5 || + (cNextDigit == 5 && !c0) || + (cNextDigit == 5 && c0 && central&1 == 1) + } + if central < upper && cup { + central++ + } + // We know where the number ends, fill directly + endindex -= trimmed + v := central + n := endindex + for n > d.nd { + v1, v2 := v/100, v%100 + d.d[n] = smalls[2*v2+1] + d.d[n-1] = smalls[2*v2+0] + n -= 2 + v = v1 + } + if n == d.nd { + d.d[n] = byte(v + '0') + } + d.nd = endindex + 1 + d.dp = d.nd + trimmed +} + +// mult64bitPow10 takes a floating-point input with a 25-bit +// mantissa and multiplies it with 10^q. The resulting mantissa +// is m*P >> 57 where P is a 64-bit truncated power of 10. +// It is typically 31 or 32-bit wide. +// The returned boolean is true if all trimmed bits were zero. +// +// That is: +// +// m*2^e2 * round(10^q) = resM * 2^resE + ε +// exact = ε == 0 +func mult64bitPow10(m uint32, e2, q int) (resM uint32, resE int, exact bool) { + if q == 0 { + // P == 1<<63 + return m << 6, e2 - 6, true + } + pow, exp2, ok := pow10(q) + if !ok { + // This never happens due to the range of float32/float64 exponent + panic("mult64bitPow10: power of 10 is out of range") + } + if q < 0 { + // Inverse powers of ten must be rounded up. + pow.Hi++ + } + hi, lo := bits.Mul64(uint64(m), pow.Hi) + e2 += exp2 - 64 + 57 + return uint32(hi<<7 | lo>>57), e2, lo<<7 == 0 +} + +// mult128bitPow10 takes a floating-point input with a 55-bit +// mantissa and multiplies it with 10^q. The resulting mantissa +// is m*P >> 119 where P is a 128-bit truncated power of 10. +// It is typically 63 or 64-bit wide. +// The returned boolean is true is all trimmed bits were zero. +// +// That is: +// +// m*2^e2 * round(10^q) = resM * 2^resE + ε +// exact = ε == 0 +func mult128bitPow10(m uint64, e2, q int) (resM uint64, resE int, exact bool) { + if q == 0 { + // P == 1<<127 + return m << 8, e2 - 8, true + } + pow, exp2, ok := pow10(q) + if !ok { + // This never happens due to the range of float32/float64 exponent + panic("mult128bitPow10: power of 10 is out of range") + } + if q < 0 { + // Inverse powers of ten must be rounded up. + pow.Lo++ + } + e2 += exp2 - 128 + 119 + + hi, mid, lo := umul192(m, pow) + return hi<<9 | mid>>55, e2, mid<<9 == 0 && lo == 0 +} diff --git a/src/internal/strconv/import_test.go b/src/internal/strconv/import_test.go new file mode 100644 index 00000000000..3dab2bf9e56 --- /dev/null +++ b/src/internal/strconv/import_test.go @@ -0,0 +1,26 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package strconv_test + +import . "internal/strconv" + +type uint128 = Uint128 + +const ( + pow10Min = Pow10Min + pow10Max = Pow10Max +) + +var ( + mulLog10_2 = MulLog10_2 + mulLog2_10 = MulLog2_10 + parseFloatPrefix = ParseFloatPrefix + pow10 = Pow10 + umul128 = Umul128 + umul192 = Umul192 + div5Tab = Div5Tab + divisiblePow5 = DivisiblePow5 + trimZeros = TrimZeros +) diff --git a/src/strconv/itoa.go b/src/internal/strconv/itoa.go similarity index 54% rename from src/strconv/itoa.go rename to src/internal/strconv/itoa.go index 928b37ffa63..2375e034f59 100644 --- a/src/strconv/itoa.go +++ b/src/internal/strconv/itoa.go @@ -6,14 +6,17 @@ package strconv import "math/bits" -const fastSmalls = true // enable fast path for small integers - // FormatUint returns the string representation of i in the given base, // for 2 <= base <= 36. The result uses the lower-case letters 'a' to 'z' // for digit values >= 10. func FormatUint(i uint64, base int) string { - if fastSmalls && i < nSmalls && base == 10 { - return small(int(i)) + if base == 10 { + if i < nSmalls { + return small(int(i)) + } + var a [24]byte + j := formatBase10(a[:], i) + return string(a[j:]) } _, s := formatBits(nil, i, base, false, false) return s @@ -23,8 +26,21 @@ func FormatUint(i uint64, base int) string { // for 2 <= base <= 36. The result uses the lower-case letters 'a' to 'z' // for digit values >= 10. func FormatInt(i int64, base int) string { - if fastSmalls && 0 <= i && i < nSmalls && base == 10 { - return small(int(i)) + if base == 10 { + if 0 <= i && i < nSmalls { + return small(int(i)) + } + var a [24]byte + u := uint64(i) + if i < 0 { + u = -u + } + j := formatBase10(a[:], u) + if i < 0 { + j-- + a[j] = '-' + } + return string(a[j:]) } _, s := formatBits(nil, uint64(i), base, i < 0, false) return s @@ -38,46 +54,29 @@ func Itoa(i int) string { // AppendInt appends the string form of the integer i, // as generated by [FormatInt], to dst and returns the extended buffer. func AppendInt(dst []byte, i int64, base int) []byte { - if fastSmalls && 0 <= i && i < nSmalls && base == 10 { - return append(dst, small(int(i))...) + u := uint64(i) + if i < 0 { + dst = append(dst, '-') + u = -u } - dst, _ = formatBits(dst, uint64(i), base, i < 0, true) - return dst + return AppendUint(dst, u, base) } // AppendUint appends the string form of the unsigned integer i, // as generated by [FormatUint], to dst and returns the extended buffer. func AppendUint(dst []byte, i uint64, base int) []byte { - if fastSmalls && i < nSmalls && base == 10 { - return append(dst, small(int(i))...) + if base == 10 { + if i < nSmalls { + return append(dst, small(int(i))...) + } + var a [24]byte + j := formatBase10(a[:], i) + return append(dst, a[j:]...) } dst, _ = formatBits(dst, i, base, false, true) return dst } -// small returns the string for an i with 0 <= i < nSmalls. -func small(i int) string { - if i < 10 { - return digits[i : i+1] - } - return smallsString[i*2 : i*2+2] -} - -const nSmalls = 100 - -const smallsString = "00010203040506070809" + - "10111213141516171819" + - "20212223242526272829" + - "30313233343536373839" + - "40414243444546474849" + - "50515253545556575859" + - "60616263646566676869" + - "70717273747576777879" + - "80818283848586878889" + - "90919293949596979899" - -const host32bit = ^uint(0)>>32 == 0 - const digits = "0123456789abcdefghijklmnopqrstuvwxyz" // formatBits computes the string representation of u in the given base. @@ -85,15 +84,15 @@ const digits = "0123456789abcdefghijklmnopqrstuvwxyz" // set, the string is appended to dst and the resulting byte slice is // returned as the first result value; otherwise the string is returned // as the second result value. +// The caller is expected to have handled base 10 separately for speed. func formatBits(dst []byte, u uint64, base int, neg, append_ bool) (d []byte, s string) { - if base < 2 || base > len(digits) { + if base < 2 || base == 10 || base > len(digits) { panic("strconv: illegal AppendInt/FormatInt base") } // 2 <= base && base <= len(digits) var a [64 + 1]byte // +1 for sign of 64bit value in base 2 i := len(a) - if neg { u = -u } @@ -101,56 +100,7 @@ func formatBits(dst []byte, u uint64, base int, neg, append_ bool) (d []byte, s // convert bits // We use uint values where we can because those will // fit into a single register even on a 32bit machine. - if base == 10 { - // common case: use constants for / because - // the compiler can optimize it into a multiply+shift - - if host32bit { - // convert the lower digits using 32bit operations - for u >= 1e9 { - // Avoid using r = a%b in addition to q = a/b - // since 64bit division and modulo operations - // are calculated by runtime functions on 32bit machines. - q := u / 1e9 - us := uint(u - q*1e9) // u % 1e9 fits into a uint - for j := 4; j > 0; j-- { - is := us % 100 * 2 - us /= 100 - i -= 2 - a[i+1] = smallsString[is+1] - a[i+0] = smallsString[is+0] - } - - // us < 10, since it contains the last digit - // from the initial 9-digit us. - i-- - a[i] = smallsString[us*2+1] - - u = q - } - // u < 1e9 - } - - // u guaranteed to fit into a uint - us := uint(u) - for us >= 100 { - is := us % 100 * 2 - us /= 100 - i -= 2 - a[i+1] = smallsString[is+1] - a[i+0] = smallsString[is+0] - } - - // us < 100 - is := us * 2 - i-- - a[i] = smallsString[is+1] - if us >= 10 { - i-- - a[i] = smallsString[is] - } - - } else if isPowerOfTwo(base) { + if isPowerOfTwo(base) { // Use shifts and masks instead of / and %. shift := uint(bits.TrailingZeros(uint(base))) b := uint64(base) @@ -197,3 +147,91 @@ func formatBits(dst []byte, u uint64, base int, neg, append_ bool) (d []byte, s func isPowerOfTwo(x int) bool { return x&(x-1) == 0 } + +const nSmalls = 100 + +// smalls is the formatting of 00..99 concatenated. +// It is then padded out with 56 x's to 256 bytes, +// so that smalls[x&0xFF] has no bounds check. +const smalls = "00010203040506070809" + + "10111213141516171819" + + "20212223242526272829" + + "30313233343536373839" + + "40414243444546474849" + + "50515253545556575859" + + "60616263646566676869" + + "70717273747576777879" + + "80818283848586878889" + + "90919293949596979899" + +const host64bit = ^uint(0)>>32 != 0 + +// small returns the string for an i with 0 <= i < nSmalls. +func small(i int) string { + if i < 10 { + return digits[i : i+1] + } + return smalls[i*2 : i*2+2] +} + +// RuntimeFormatBase10 formats u into the tail of a +// and returns the offset to the first byte written to a. +// It is only for use by package runtime. +// Other packages should use AppendUint. +func RuntimeFormatBase10(a []byte, u uint64) int { + return formatBase10(a, u) +} + +// formatBase10 formats the decimal representation of u into the tail of a +// and returns the offset of the first byte written to a. That is, after +// +// i := formatBase10(a, u) +// +// the decimal representation is in a[i:]. +func formatBase10(a []byte, u uint64) int { + // Split into 9-digit chunks that fit in uint32s + // and convert each chunk using uint32 math instead of uint64 math. + // The obvious way to write the outer loop is "for u >= 1e9", but most numbers are small, + // so the setup for the comparison u >= 1e9 is usually pure overhead. + // Instead, we approximate it by u>>29 != 0, which is usually faster and good enough. + i := len(a) + for (host64bit && u>>29 != 0) || (!host64bit && uint32(u)>>29|uint32(u>>32) != 0) { + var lo uint32 + u, lo = u/1e9, uint32(u%1e9) + + // Convert 9 digits. + for range 4 { + var dd uint32 + lo, dd = lo/100, (lo%100)*2 + i -= 2 + a[i+0], a[i+1] = smalls[dd+0], smalls[dd+1] + } + i-- + a[i] = smalls[lo*2+1] + + // If we'd been using u >= 1e9 then we would be guaranteed that u/1e9 > 0, + // but since we used u>>29 != 0, u/1e9 might be 0, so we might be done. + // (If u is now 0, then at the start we had 2²⁹ ≤ u < 10⁹, so it was still correct + // to write 9 digits; we have not accidentally written any leading zeros.) + if u == 0 { + return i + } + } + + // Convert final chunk, at most 8 digits. + lo := uint32(u) + for lo >= 100 { + var dd uint32 + lo, dd = lo/100, (lo%100)*2 + i -= 2 + a[i+0], a[i+1] = smalls[dd+0], smalls[dd+1] + } + i-- + dd := lo * 2 + a[i] = smalls[dd+1] + if lo >= 10 { + i-- + a[i] = smalls[dd+0] + } + return i +} diff --git a/src/strconv/itoa_test.go b/src/internal/strconv/itoa_test.go similarity index 98% rename from src/strconv/itoa_test.go rename to src/internal/strconv/itoa_test.go index b8bc52490a9..1629e45d48c 100644 --- a/src/strconv/itoa_test.go +++ b/src/internal/strconv/itoa_test.go @@ -5,7 +5,8 @@ package strconv_test import ( - . "strconv" + "fmt" + . "internal/strconv" "testing" ) @@ -230,7 +231,7 @@ func BenchmarkAppendIntSmall(b *testing.B) { func BenchmarkAppendUintVarlen(b *testing.B) { for _, test := range varlenUints { - b.Run(test.out, func(b *testing.B) { + b.Run(fmt.Sprint("digits=", len(test.out)), func(b *testing.B) { dst := make([]byte, 0, 30) for j := 0; j < b.N; j++ { dst = AppendUint(dst[:0], test.in, 10) diff --git a/src/internal/strconv/math.go b/src/internal/strconv/math.go new file mode 100644 index 00000000000..3b884e846a6 --- /dev/null +++ b/src/internal/strconv/math.go @@ -0,0 +1,179 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package strconv + +import "math/bits" + +// A uint128 is a 128-bit uint. +// The fields are exported to make them visible to package strconv_test. +type uint128 struct { + Hi uint64 + Lo uint64 +} + +// umul128 returns the 128-bit product x*y. +func umul128(x, y uint64) uint128 { + hi, lo := bits.Mul64(x, y) + return uint128{hi, lo} +} + +// umul192 returns the 192-bit product x*y in three uint64s. +func umul192(x uint64, y uint128) (hi, mid, lo uint64) { + mid1, lo := bits.Mul64(x, y.Lo) + hi, mid2 := bits.Mul64(x, y.Hi) + mid, carry := bits.Add64(mid1, mid2, 0) + return hi + carry, mid, lo +} + +// pow10 returns the 128-bit mantissa and binary exponent of 10**e. +// That is, 10^e = mant/2^128 * 2**exp. +// If e is out of range, pow10 returns ok=false. +func pow10(e int) (mant uint128, exp int, ok bool) { + if e < pow10Min || e > pow10Max { + return + } + return pow10Tab[e-pow10Min], 1 + mulLog2_10(e), true +} + +// mulLog10_2 returns math.Floor(x * log(2)/log(10)) for an integer x in +// the range -1600 <= x && x <= +1600. +// +// The range restriction lets us work in faster integer arithmetic instead of +// slower floating point arithmetic. Correctness is verified by unit tests. +func mulLog10_2(x int) int { + // log(2)/log(10) ≈ 0.30102999566 ≈ 78913 / 2^18 + return (x * 78913) >> 18 +} + +// mulLog2_10 returns math.Floor(x * log(10)/log(2)) for an integer x in +// the range -500 <= x && x <= +500. +// +// The range restriction lets us work in faster integer arithmetic instead of +// slower floating point arithmetic. Correctness is verified by unit tests. +func mulLog2_10(x int) int { + // log(10)/log(2) ≈ 3.32192809489 ≈ 108853 / 2^15 + return (x * 108853) >> 15 +} + +func bool2uint(b bool) uint { + if b { + return 1 + } + return 0 +} + +// Exact Division and Remainder Checking +// +// An exact division x/c (exact means x%c == 0) +// can be implemented by x*m where m is the multiplicative inverse of c (m*c == 1). +// +// Since c is also the multiplicative inverse of m, x*m is lossless, +// and all the exact multiples of c map to all of [0, maxUint64/c]. +// The non-multiples are forced to map to larger values. +// This also gives a quick test for whether x is an exact multiple of c: +// compute the exact division and check whether it's at most maxUint64/c: +// x%c == 0 => x*m <= maxUint64/c. +// +// Only odd c have multiplicative inverses mod powers of two. +// To do an exact divide x / (c<>s instead. +// And to check for remainder, we need to check that those low s +// bits are all zero before we shift them away. We can merge that +// with the <= for the exact odd remainder check by rotating the +// shifted bits into the high part instead: +// x%(c< bits.RotateLeft64(x*m, -s) <= maxUint64/c. +// +// The compiler does this transformation automatically in general, +// but we apply it here by hand in a few ways that the compiler can't help with. +// +// For a more detailed explanation, see +// Henry S. Warren, Jr., Hacker's Delight, 2nd ed., sections 10-16 and 10-17. + +// divisiblePow5 reports whether x is divisible by 5^p. +// It returns false for p not in [1, 22], +// because we only care about float64 mantissas, and 5^23 > 2^53. +func divisiblePow5(x uint64, p int) bool { + return 1 <= p && p <= 22 && x*div5Tab[p-1][0] <= div5Tab[p-1][1] +} + +const maxUint64 = 1<<64 - 1 + +// div5Tab[p-1] is the multiplicative inverse of 5^p and maxUint64/5^p. +var div5Tab = [22][2]uint64{ + {0xcccccccccccccccd, maxUint64 / 5}, + {0x8f5c28f5c28f5c29, maxUint64 / 5 / 5}, + {0x1cac083126e978d5, maxUint64 / 5 / 5 / 5}, + {0xd288ce703afb7e91, maxUint64 / 5 / 5 / 5 / 5}, + {0x5d4e8fb00bcbe61d, maxUint64 / 5 / 5 / 5 / 5 / 5}, + {0x790fb65668c26139, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5}, + {0xe5032477ae8d46a5, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0xc767074b22e90e21, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0x8e47ce423a2e9c6d, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0x4fa7f60d3ed61f49, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0x0fee64690c913975, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0x3662e0e1cf503eb1, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0xa47a2cf9f6433fbd, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0x54186f653140a659, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0x7738164770402145, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0xe4a4d1417cd9a041, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0xc75429d9e5c5200d, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0xc1773b91fac10669, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0x26b172506559ce15, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0xd489e3a9addec2d1, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0x90e860bb892c8d5d, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0x502e79bf1b6f4f79, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, +} + +// trimZeros trims trailing zeros from x. +// It finds the largest p such that x % 10^p == 0 +// and then returns x / 10^p, p. +// +// This is here for reference and tested, because it is an optimization +// used by other ftoa algorithms, but in our implementations it has +// never been benchmarked to be faster than trimming zeros after +// formatting into decimal bytes. +func trimZeros(x uint64) (uint64, int) { + const ( + div1e8m = 0xc767074b22e90e21 + div1e8le = maxUint64 / 100000000 + + div1e4m = 0xd288ce703afb7e91 + div1e4le = maxUint64 / 10000 + + div1e2m = 0x8f5c28f5c28f5c29 + div1e2le = maxUint64 / 100 + + div1e1m = 0xcccccccccccccccd + div1e1le = maxUint64 / 10 + ) + + // _ = assert[x - y] asserts at compile time that x == y. + // Assert that the multiplicative inverses are correct + // by checking that (div1eNm * 5^N) % 1<<64 == 1. + var assert [1]struct{} + _ = assert[(div1e8m*5*5*5*5*5*5*5*5)%(1<<64)-1] + _ = assert[(div1e4m*5*5*5*5)%(1<<64)-1] + _ = assert[(div1e2m*5*5)%(1<<64)-1] + _ = assert[(div1e1m*5)%(1<<64)-1] + + // Cut 8 zeros, then 4, then 2, then 1. + p := 0 + for d := bits.RotateLeft64(x*div1e8m, -8); d <= div1e8le; d = bits.RotateLeft64(x*div1e8m, -8) { + x = d + p += 8 + } + if d := bits.RotateLeft64(x*div1e4m, -4); d <= div1e4le { + x = d + p += 4 + } + if d := bits.RotateLeft64(x*div1e2m, -2); d <= div1e2le { + x = d + p += 2 + } + if d := bits.RotateLeft64(x*div1e1m, -1); d <= div1e1le { + x = d + p += 1 + } + return x, p +} diff --git a/src/internal/strconv/math_test.go b/src/internal/strconv/math_test.go new file mode 100644 index 00000000000..55e25f98cfe --- /dev/null +++ b/src/internal/strconv/math_test.go @@ -0,0 +1,165 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package strconv_test + +import ( + . "internal/strconv" + "math" + "testing" +) + +var pow10Tests = []struct { + exp10 int + mant uint128 + exp2 int + ok bool +}{ + {-349, uint128{0, 0}, 0, false}, + {-348, uint128{0xFA8FD5A0081C0288, 0x1732C869CD60E453}, -1156, true}, + {0, uint128{0x8000000000000000, 0x0000000000000000}, 1, true}, + {347, uint128{0xD13EB46469447567, 0x4B7195F2D2D1A9FB}, 1153, true}, + {348, uint128{0, 0}, 0, false}, +} + +func TestPow10(t *testing.T) { + for _, tt := range pow10Tests { + mant, exp2, ok := pow10(tt.exp10) + if mant != tt.mant || exp2 != tt.exp2 { + t.Errorf("pow10(%d) = %#016x, %#016x, %d, %v want %#016x,%#016x, %d, %v", + tt.exp10, mant.Hi, mant.Lo, exp2, ok, + tt.mant.Hi, tt.mant.Lo, tt.exp2, tt.ok) + } + } + + for p := pow10Min; p <= pow10Max; p++ { + mant, exp2, ok := pow10(p) + if !ok { + t.Errorf("pow10(%d) not ok", p) + continue + } + // Note: -64 instead of -128 because we only used mant.Hi, not all of mant. + have := math.Ldexp(float64(mant.Hi), exp2-64) + want := math.Pow(10, float64(p)) + if math.Abs(have-want)/want > 0.00001 { + t.Errorf("pow10(%d) = %#016x%016x/2^128 * 2^%d = %g want ~%g", p, mant.Hi, mant.Lo, exp2, have, want) + } + } + +} + +func u128(hi, lo uint64) uint128 { + return uint128{Hi: hi, Lo: lo} +} + +var umul192Tests = []struct { + x uint64 + y uint128 + hi uint64 + mid uint64 + lo uint64 +}{ + {0, u128(0, 0), 0, 0, 0}, + {^uint64(0), u128(^uint64(0), ^uint64(0)), ^uint64(1), ^uint64(0), 1}, +} + +func TestUmul192(t *testing.T) { + for _, tt := range umul192Tests { + hi, mid, lo := Umul192(tt.x, tt.y) + if hi != tt.hi || mid != tt.mid || lo != tt.lo { + t.Errorf("umul192(%#x, {%#x,%#x}) = %#x, %#x, %#x, want %#x, %#x, %#x", + tt.x, tt.y.Hi, tt.y.Lo, hi, mid, lo, tt.hi, tt.mid, tt.lo) + } + } +} + +func TestMulLog10_2(t *testing.T) { + for x := -1600; x <= +1600; x++ { + iMath := mulLog10_2(x) + fMath := int(math.Floor(float64(x) * math.Ln2 / math.Ln10)) + if iMath != fMath { + t.Errorf("mulLog10_2(%d) failed: %d vs %d\n", x, iMath, fMath) + } + } +} + +func TestMulLog2_10(t *testing.T) { + for x := -500; x <= +500; x++ { + iMath := mulLog2_10(x) + fMath := int(math.Floor(float64(x) * math.Ln10 / math.Ln2)) + if iMath != fMath { + t.Errorf("mulLog2_10(%d) failed: %d vs %d\n", x, iMath, fMath) + } + } +} + +func pow5(p int) uint64 { + x := uint64(1) + for range p { + x *= 5 + } + return x +} + +func TestDivisiblePow5(t *testing.T) { + for p := 1; p <= 22; p++ { + x := pow5(p) + if divisiblePow5(1, p) { + t.Errorf("divisiblePow5(1, %d) = true, want, false", p) + } + if divisiblePow5(x-1, p) { + t.Errorf("divisiblePow5(%d, %d) = true, want false", x-1, p) + } + if divisiblePow5(x+1, p) { + t.Errorf("divisiblePow5(%d, %d) = true, want false", x-1, p) + } + if divisiblePow5(x/5, p) { + t.Errorf("divisiblePow5(%d, %d) = true, want false", x/5, p) + } + if !divisiblePow5(0, p) { + t.Errorf("divisiblePow5(0, %d) = false, want true", p) + } + if !divisiblePow5(x, p) { + t.Errorf("divisiblePow5(%d, %d) = false, want true", x, p) + } + if 2*x > x && !divisiblePow5(2*x, p) { + t.Errorf("divisiblePow5(%d, %d) = false, want true", 2*x, p) + } + } +} + +func TestDiv5Tab(t *testing.T) { + for p := 1; p <= 22; p++ { + m := div5Tab[p-1][0] + le := div5Tab[p-1][1] + + // See comment in math.go on div5Tab. + // m needs to be multiplicative inverse of pow5(p). + if m*pow5(p) != 1 { + t.Errorf("pow5Tab[%d-1][0] = %#x, but %#x * (5**%d) = %d, want 1", p, m, m, p, m*pow5(p)) + } + + // le needs to be ⌊(1<<64 - 1) / 5^p⌋. + want := (1<<64 - 1) / pow5(p) + if le != want { + t.Errorf("pow5Tab[%d-1][1] = %#x, want %#x", p, le, want) + } + } +} + +func TestTrimZeros(t *testing.T) { + for _, x := range []uint64{1, 2, 3, 4, 101, 123} { + want := x + for p := range 20 { + haveX, haveP := trimZeros(x) + if haveX != want || haveP != p { + t.Errorf("trimZeros(%d) = %d, %d, want %d, %d", x, haveX, haveP, want, p) + } + if x >= (1<<64-1)/10 { + break + } + x *= 10 + } + } +} diff --git a/src/internal/strconv/pow10gen.go b/src/internal/strconv/pow10gen.go new file mode 100644 index 00000000000..2d428fe088e --- /dev/null +++ b/src/internal/strconv/pow10gen.go @@ -0,0 +1,91 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +import ( + "bytes" + "fmt" + "go/format" + "log" + "math/big" + "os" +) + +const ( + minExp = -348 + maxExp = 347 +) + +func main() { + log.SetPrefix("pow10gen: ") + log.SetFlags(0) + + var ( + one = big.NewInt(1) + ten = big.NewInt(10) + + b1p64 = new(big.Int).Lsh(one, 64) + b1p128 = new(big.Int).Lsh(one, 128) + + r2 = big.NewRat(2, 1) + r1p128 = new(big.Rat).SetInt(b1p128) + ) + + var out bytes.Buffer + fmt.Fprintf(&out, top, minExp, maxExp) + for e := int64(minExp); e <= maxExp; e++ { + var r *big.Rat + if e >= 0 { + r = new(big.Rat).SetInt(new(big.Int).Exp(ten, big.NewInt(e), nil)) + } else { + r = new(big.Rat).SetFrac(one, new(big.Int).Exp(ten, big.NewInt(-e), nil)) + } + be := 0 + for r.Cmp(r1p128) < 0 { + r.Mul(r, r2) + be++ + } + for r.Cmp(r1p128) >= 0 { + r.Quo(r, r2) + be-- + } + d := new(big.Int).Div(r.Num(), r.Denom()) + hi, lo := new(big.Int).DivMod(d, b1p64, new(big.Int)) + fmt.Fprintf(&out, "\t{%#016x, %#016x}, // 1e%d * 2**%d\n", hi.Uint64(), lo.Uint64(), e, be) + } + fmt.Fprintf(&out, "}\n") + + src, err := format.Source(out.Bytes()) + if err != nil { + log.Fatal(err) + } + + if err := os.WriteFile("pow10tab.go", src, 0666); err != nil { + log.Fatal(err) + } +} + +var top = `// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by: go run pow10gen.go. DO NOT EDIT. +// +//go:generate go run pow10gen.go + +package strconv + +const ( + pow10Min = %d + pow10Max = %d +) + + +// pow10Tab holds 128-bit mantissas of powers of 10. +// The values are scaled so the high bit is always set; there is no "implicit leading 1 bit". +var pow10Tab = [...]uint128{ +` diff --git a/src/internal/strconv/pow10tab.go b/src/internal/strconv/pow10tab.go new file mode 100644 index 00000000000..029ae02b660 --- /dev/null +++ b/src/internal/strconv/pow10tab.go @@ -0,0 +1,715 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by: go run pow10gen.go. DO NOT EDIT. +// +//go:generate go run pow10gen.go + +package strconv + +const ( + pow10Min = -348 + pow10Max = 347 +) + +// pow10Tab holds 128-bit mantissas of powers of 10. +// The values are scaled so the high bit is always set; there is no "implicit leading 1 bit". +var pow10Tab = [...]uint128{ + {0xfa8fd5a0081c0288, 0x1732c869cd60e453}, // 1e-348 * 2**1284 + {0x9c99e58405118195, 0x0e7fbd42205c8eb4}, // 1e-347 * 2**1280 + {0xc3c05ee50655e1fa, 0x521fac92a873b261}, // 1e-346 * 2**1277 + {0xf4b0769e47eb5a78, 0xe6a797b752909ef9}, // 1e-345 * 2**1274 + {0x98ee4a22ecf3188b, 0x9028bed2939a635c}, // 1e-344 * 2**1270 + {0xbf29dcaba82fdeae, 0x7432ee873880fc33}, // 1e-343 * 2**1267 + {0xeef453d6923bd65a, 0x113faa2906a13b3f}, // 1e-342 * 2**1264 + {0x9558b4661b6565f8, 0x4ac7ca59a424c507}, // 1e-341 * 2**1260 + {0xbaaee17fa23ebf76, 0x5d79bcf00d2df649}, // 1e-340 * 2**1257 + {0xe95a99df8ace6f53, 0xf4d82c2c107973dc}, // 1e-339 * 2**1254 + {0x91d8a02bb6c10594, 0x79071b9b8a4be869}, // 1e-338 * 2**1250 + {0xb64ec836a47146f9, 0x9748e2826cdee284}, // 1e-337 * 2**1247 + {0xe3e27a444d8d98b7, 0xfd1b1b2308169b25}, // 1e-336 * 2**1244 + {0x8e6d8c6ab0787f72, 0xfe30f0f5e50e20f7}, // 1e-335 * 2**1240 + {0xb208ef855c969f4f, 0xbdbd2d335e51a935}, // 1e-334 * 2**1237 + {0xde8b2b66b3bc4723, 0xad2c788035e61382}, // 1e-333 * 2**1234 + {0x8b16fb203055ac76, 0x4c3bcb5021afcc31}, // 1e-332 * 2**1230 + {0xaddcb9e83c6b1793, 0xdf4abe242a1bbf3d}, // 1e-331 * 2**1227 + {0xd953e8624b85dd78, 0xd71d6dad34a2af0d}, // 1e-330 * 2**1224 + {0x87d4713d6f33aa6b, 0x8672648c40e5ad68}, // 1e-329 * 2**1220 + {0xa9c98d8ccb009506, 0x680efdaf511f18c2}, // 1e-328 * 2**1217 + {0xd43bf0effdc0ba48, 0x0212bd1b2566def2}, // 1e-327 * 2**1214 + {0x84a57695fe98746d, 0x014bb630f7604b57}, // 1e-326 * 2**1210 + {0xa5ced43b7e3e9188, 0x419ea3bd35385e2d}, // 1e-325 * 2**1207 + {0xcf42894a5dce35ea, 0x52064cac828675b9}, // 1e-324 * 2**1204 + {0x818995ce7aa0e1b2, 0x7343efebd1940993}, // 1e-323 * 2**1200 + {0xa1ebfb4219491a1f, 0x1014ebe6c5f90bf8}, // 1e-322 * 2**1197 + {0xca66fa129f9b60a6, 0xd41a26e077774ef6}, // 1e-321 * 2**1194 + {0xfd00b897478238d0, 0x8920b098955522b4}, // 1e-320 * 2**1191 + {0x9e20735e8cb16382, 0x55b46e5f5d5535b0}, // 1e-319 * 2**1187 + {0xc5a890362fddbc62, 0xeb2189f734aa831d}, // 1e-318 * 2**1184 + {0xf712b443bbd52b7b, 0xa5e9ec7501d523e4}, // 1e-317 * 2**1181 + {0x9a6bb0aa55653b2d, 0x47b233c92125366e}, // 1e-316 * 2**1177 + {0xc1069cd4eabe89f8, 0x999ec0bb696e840a}, // 1e-315 * 2**1174 + {0xf148440a256e2c76, 0xc00670ea43ca250d}, // 1e-314 * 2**1171 + {0x96cd2a865764dbca, 0x380406926a5e5728}, // 1e-313 * 2**1167 + {0xbc807527ed3e12bc, 0xc605083704f5ecf2}, // 1e-312 * 2**1164 + {0xeba09271e88d976b, 0xf7864a44c633682e}, // 1e-311 * 2**1161 + {0x93445b8731587ea3, 0x7ab3ee6afbe0211d}, // 1e-310 * 2**1157 + {0xb8157268fdae9e4c, 0x5960ea05bad82964}, // 1e-309 * 2**1154 + {0xe61acf033d1a45df, 0x6fb92487298e33bd}, // 1e-308 * 2**1151 + {0x8fd0c16206306bab, 0xa5d3b6d479f8e056}, // 1e-307 * 2**1147 + {0xb3c4f1ba87bc8696, 0x8f48a4899877186c}, // 1e-306 * 2**1144 + {0xe0b62e2929aba83c, 0x331acdabfe94de87}, // 1e-305 * 2**1141 + {0x8c71dcd9ba0b4925, 0x9ff0c08b7f1d0b14}, // 1e-304 * 2**1137 + {0xaf8e5410288e1b6f, 0x07ecf0ae5ee44dd9}, // 1e-303 * 2**1134 + {0xdb71e91432b1a24a, 0xc9e82cd9f69d6150}, // 1e-302 * 2**1131 + {0x892731ac9faf056e, 0xbe311c083a225cd2}, // 1e-301 * 2**1127 + {0xab70fe17c79ac6ca, 0x6dbd630a48aaf406}, // 1e-300 * 2**1124 + {0xd64d3d9db981787d, 0x092cbbccdad5b108}, // 1e-299 * 2**1121 + {0x85f0468293f0eb4e, 0x25bbf56008c58ea5}, // 1e-298 * 2**1117 + {0xa76c582338ed2621, 0xaf2af2b80af6f24e}, // 1e-297 * 2**1114 + {0xd1476e2c07286faa, 0x1af5af660db4aee1}, // 1e-296 * 2**1111 + {0x82cca4db847945ca, 0x50d98d9fc890ed4d}, // 1e-295 * 2**1107 + {0xa37fce126597973c, 0xe50ff107bab528a0}, // 1e-294 * 2**1104 + {0xcc5fc196fefd7d0c, 0x1e53ed49a96272c8}, // 1e-293 * 2**1101 + {0xff77b1fcbebcdc4f, 0x25e8e89c13bb0f7a}, // 1e-292 * 2**1098 + {0x9faacf3df73609b1, 0x77b191618c54e9ac}, // 1e-291 * 2**1094 + {0xc795830d75038c1d, 0xd59df5b9ef6a2417}, // 1e-290 * 2**1091 + {0xf97ae3d0d2446f25, 0x4b0573286b44ad1d}, // 1e-289 * 2**1088 + {0x9becce62836ac577, 0x4ee367f9430aec32}, // 1e-288 * 2**1084 + {0xc2e801fb244576d5, 0x229c41f793cda73f}, // 1e-287 * 2**1081 + {0xf3a20279ed56d48a, 0x6b43527578c1110f}, // 1e-286 * 2**1078 + {0x9845418c345644d6, 0x830a13896b78aaa9}, // 1e-285 * 2**1074 + {0xbe5691ef416bd60c, 0x23cc986bc656d553}, // 1e-284 * 2**1071 + {0xedec366b11c6cb8f, 0x2cbfbe86b7ec8aa8}, // 1e-283 * 2**1068 + {0x94b3a202eb1c3f39, 0x7bf7d71432f3d6a9}, // 1e-282 * 2**1064 + {0xb9e08a83a5e34f07, 0xdaf5ccd93fb0cc53}, // 1e-281 * 2**1061 + {0xe858ad248f5c22c9, 0xd1b3400f8f9cff68}, // 1e-280 * 2**1058 + {0x91376c36d99995be, 0x23100809b9c21fa1}, // 1e-279 * 2**1054 + {0xb58547448ffffb2d, 0xabd40a0c2832a78a}, // 1e-278 * 2**1051 + {0xe2e69915b3fff9f9, 0x16c90c8f323f516c}, // 1e-277 * 2**1048 + {0x8dd01fad907ffc3b, 0xae3da7d97f6792e3}, // 1e-276 * 2**1044 + {0xb1442798f49ffb4a, 0x99cd11cfdf41779c}, // 1e-275 * 2**1041 + {0xdd95317f31c7fa1d, 0x40405643d711d583}, // 1e-274 * 2**1038 + {0x8a7d3eef7f1cfc52, 0x482835ea666b2572}, // 1e-273 * 2**1034 + {0xad1c8eab5ee43b66, 0xda3243650005eecf}, // 1e-272 * 2**1031 + {0xd863b256369d4a40, 0x90bed43e40076a82}, // 1e-271 * 2**1028 + {0x873e4f75e2224e68, 0x5a7744a6e804a291}, // 1e-270 * 2**1024 + {0xa90de3535aaae202, 0x711515d0a205cb36}, // 1e-269 * 2**1021 + {0xd3515c2831559a83, 0x0d5a5b44ca873e03}, // 1e-268 * 2**1018 + {0x8412d9991ed58091, 0xe858790afe9486c2}, // 1e-267 * 2**1014 + {0xa5178fff668ae0b6, 0x626e974dbe39a872}, // 1e-266 * 2**1011 + {0xce5d73ff402d98e3, 0xfb0a3d212dc8128f}, // 1e-265 * 2**1008 + {0x80fa687f881c7f8e, 0x7ce66634bc9d0b99}, // 1e-264 * 2**1004 + {0xa139029f6a239f72, 0x1c1fffc1ebc44e80}, // 1e-263 * 2**1001 + {0xc987434744ac874e, 0xa327ffb266b56220}, // 1e-262 * 2**998 + {0xfbe9141915d7a922, 0x4bf1ff9f0062baa8}, // 1e-261 * 2**995 + {0x9d71ac8fada6c9b5, 0x6f773fc3603db4a9}, // 1e-260 * 2**991 + {0xc4ce17b399107c22, 0xcb550fb4384d21d3}, // 1e-259 * 2**988 + {0xf6019da07f549b2b, 0x7e2a53a146606a48}, // 1e-258 * 2**985 + {0x99c102844f94e0fb, 0x2eda7444cbfc426d}, // 1e-257 * 2**981 + {0xc0314325637a1939, 0xfa911155fefb5308}, // 1e-256 * 2**978 + {0xf03d93eebc589f88, 0x793555ab7eba27ca}, // 1e-255 * 2**975 + {0x96267c7535b763b5, 0x4bc1558b2f3458de}, // 1e-254 * 2**971 + {0xbbb01b9283253ca2, 0x9eb1aaedfb016f16}, // 1e-253 * 2**968 + {0xea9c227723ee8bcb, 0x465e15a979c1cadc}, // 1e-252 * 2**965 + {0x92a1958a7675175f, 0x0bfacd89ec191ec9}, // 1e-251 * 2**961 + {0xb749faed14125d36, 0xcef980ec671f667b}, // 1e-250 * 2**958 + {0xe51c79a85916f484, 0x82b7e12780e7401a}, // 1e-249 * 2**955 + {0x8f31cc0937ae58d2, 0xd1b2ecb8b0908810}, // 1e-248 * 2**951 + {0xb2fe3f0b8599ef07, 0x861fa7e6dcb4aa15}, // 1e-247 * 2**948 + {0xdfbdcece67006ac9, 0x67a791e093e1d49a}, // 1e-246 * 2**945 + {0x8bd6a141006042bd, 0xe0c8bb2c5c6d24e0}, // 1e-245 * 2**941 + {0xaecc49914078536d, 0x58fae9f773886e18}, // 1e-244 * 2**938 + {0xda7f5bf590966848, 0xaf39a475506a899e}, // 1e-243 * 2**935 + {0x888f99797a5e012d, 0x6d8406c952429603}, // 1e-242 * 2**931 + {0xaab37fd7d8f58178, 0xc8e5087ba6d33b83}, // 1e-241 * 2**928 + {0xd5605fcdcf32e1d6, 0xfb1e4a9a90880a64}, // 1e-240 * 2**925 + {0x855c3be0a17fcd26, 0x5cf2eea09a55067f}, // 1e-239 * 2**921 + {0xa6b34ad8c9dfc06f, 0xf42faa48c0ea481e}, // 1e-238 * 2**918 + {0xd0601d8efc57b08b, 0xf13b94daf124da26}, // 1e-237 * 2**915 + {0x823c12795db6ce57, 0x76c53d08d6b70858}, // 1e-236 * 2**911 + {0xa2cb1717b52481ed, 0x54768c4b0c64ca6e}, // 1e-235 * 2**908 + {0xcb7ddcdda26da268, 0xa9942f5dcf7dfd09}, // 1e-234 * 2**905 + {0xfe5d54150b090b02, 0xd3f93b35435d7c4c}, // 1e-233 * 2**902 + {0x9efa548d26e5a6e1, 0xc47bc5014a1a6daf}, // 1e-232 * 2**898 + {0xc6b8e9b0709f109a, 0x359ab6419ca1091b}, // 1e-231 * 2**895 + {0xf867241c8cc6d4c0, 0xc30163d203c94b62}, // 1e-230 * 2**892 + {0x9b407691d7fc44f8, 0x79e0de63425dcf1d}, // 1e-229 * 2**888 + {0xc21094364dfb5636, 0x985915fc12f542e4}, // 1e-228 * 2**885 + {0xf294b943e17a2bc4, 0x3e6f5b7b17b2939d}, // 1e-227 * 2**882 + {0x979cf3ca6cec5b5a, 0xa705992ceecf9c42}, // 1e-226 * 2**878 + {0xbd8430bd08277231, 0x50c6ff782a838353}, // 1e-225 * 2**875 + {0xece53cec4a314ebd, 0xa4f8bf5635246428}, // 1e-224 * 2**872 + {0x940f4613ae5ed136, 0x871b7795e136be99}, // 1e-223 * 2**868 + {0xb913179899f68584, 0x28e2557b59846e3f}, // 1e-222 * 2**865 + {0xe757dd7ec07426e5, 0x331aeada2fe589cf}, // 1e-221 * 2**862 + {0x9096ea6f3848984f, 0x3ff0d2c85def7621}, // 1e-220 * 2**858 + {0xb4bca50b065abe63, 0x0fed077a756b53a9}, // 1e-219 * 2**855 + {0xe1ebce4dc7f16dfb, 0xd3e8495912c62894}, // 1e-218 * 2**852 + {0x8d3360f09cf6e4bd, 0x64712dd7abbbd95c}, // 1e-217 * 2**848 + {0xb080392cc4349dec, 0xbd8d794d96aacfb3}, // 1e-216 * 2**845 + {0xdca04777f541c567, 0xecf0d7a0fc5583a0}, // 1e-215 * 2**842 + {0x89e42caaf9491b60, 0xf41686c49db57244}, // 1e-214 * 2**838 + {0xac5d37d5b79b6239, 0x311c2875c522ced5}, // 1e-213 * 2**835 + {0xd77485cb25823ac7, 0x7d633293366b828b}, // 1e-212 * 2**832 + {0x86a8d39ef77164bc, 0xae5dff9c02033197}, // 1e-211 * 2**828 + {0xa8530886b54dbdeb, 0xd9f57f830283fdfc}, // 1e-210 * 2**825 + {0xd267caa862a12d66, 0xd072df63c324fd7b}, // 1e-209 * 2**822 + {0x8380dea93da4bc60, 0x4247cb9e59f71e6d}, // 1e-208 * 2**818 + {0xa46116538d0deb78, 0x52d9be85f074e608}, // 1e-207 * 2**815 + {0xcd795be870516656, 0x67902e276c921f8b}, // 1e-206 * 2**812 + {0x806bd9714632dff6, 0x00ba1cd8a3db53b6}, // 1e-205 * 2**808 + {0xa086cfcd97bf97f3, 0x80e8a40eccd228a4}, // 1e-204 * 2**805 + {0xc8a883c0fdaf7df0, 0x6122cd128006b2cd}, // 1e-203 * 2**802 + {0xfad2a4b13d1b5d6c, 0x796b805720085f81}, // 1e-202 * 2**799 + {0x9cc3a6eec6311a63, 0xcbe3303674053bb0}, // 1e-201 * 2**795 + {0xc3f490aa77bd60fc, 0xbedbfc4411068a9c}, // 1e-200 * 2**792 + {0xf4f1b4d515acb93b, 0xee92fb5515482d44}, // 1e-199 * 2**789 + {0x991711052d8bf3c5, 0x751bdd152d4d1c4a}, // 1e-198 * 2**785 + {0xbf5cd54678eef0b6, 0xd262d45a78a0635d}, // 1e-197 * 2**782 + {0xef340a98172aace4, 0x86fb897116c87c34}, // 1e-196 * 2**779 + {0x9580869f0e7aac0e, 0xd45d35e6ae3d4da0}, // 1e-195 * 2**775 + {0xbae0a846d2195712, 0x8974836059cca109}, // 1e-194 * 2**772 + {0xe998d258869facd7, 0x2bd1a438703fc94b}, // 1e-193 * 2**769 + {0x91ff83775423cc06, 0x7b6306a34627ddcf}, // 1e-192 * 2**765 + {0xb67f6455292cbf08, 0x1a3bc84c17b1d542}, // 1e-191 * 2**762 + {0xe41f3d6a7377eeca, 0x20caba5f1d9e4a93}, // 1e-190 * 2**759 + {0x8e938662882af53e, 0x547eb47b7282ee9c}, // 1e-189 * 2**755 + {0xb23867fb2a35b28d, 0xe99e619a4f23aa43}, // 1e-188 * 2**752 + {0xdec681f9f4c31f31, 0x6405fa00e2ec94d4}, // 1e-187 * 2**749 + {0x8b3c113c38f9f37e, 0xde83bc408dd3dd04}, // 1e-186 * 2**745 + {0xae0b158b4738705e, 0x9624ab50b148d445}, // 1e-185 * 2**742 + {0xd98ddaee19068c76, 0x3badd624dd9b0957}, // 1e-184 * 2**739 + {0x87f8a8d4cfa417c9, 0xe54ca5d70a80e5d6}, // 1e-183 * 2**735 + {0xa9f6d30a038d1dbc, 0x5e9fcf4ccd211f4c}, // 1e-182 * 2**732 + {0xd47487cc8470652b, 0x7647c3200069671f}, // 1e-181 * 2**729 + {0x84c8d4dfd2c63f3b, 0x29ecd9f40041e073}, // 1e-180 * 2**725 + {0xa5fb0a17c777cf09, 0xf468107100525890}, // 1e-179 * 2**722 + {0xcf79cc9db955c2cc, 0x7182148d4066eeb4}, // 1e-178 * 2**719 + {0x81ac1fe293d599bf, 0xc6f14cd848405530}, // 1e-177 * 2**715 + {0xa21727db38cb002f, 0xb8ada00e5a506a7c}, // 1e-176 * 2**712 + {0xca9cf1d206fdc03b, 0xa6d90811f0e4851c}, // 1e-175 * 2**709 + {0xfd442e4688bd304a, 0x908f4a166d1da663}, // 1e-174 * 2**706 + {0x9e4a9cec15763e2e, 0x9a598e4e043287fe}, // 1e-173 * 2**702 + {0xc5dd44271ad3cdba, 0x40eff1e1853f29fd}, // 1e-172 * 2**699 + {0xf7549530e188c128, 0xd12bee59e68ef47c}, // 1e-171 * 2**696 + {0x9a94dd3e8cf578b9, 0x82bb74f8301958ce}, // 1e-170 * 2**692 + {0xc13a148e3032d6e7, 0xe36a52363c1faf01}, // 1e-169 * 2**689 + {0xf18899b1bc3f8ca1, 0xdc44e6c3cb279ac1}, // 1e-168 * 2**686 + {0x96f5600f15a7b7e5, 0x29ab103a5ef8c0b9}, // 1e-167 * 2**682 + {0xbcb2b812db11a5de, 0x7415d448f6b6f0e7}, // 1e-166 * 2**679 + {0xebdf661791d60f56, 0x111b495b3464ad21}, // 1e-165 * 2**676 + {0x936b9fcebb25c995, 0xcab10dd900beec34}, // 1e-164 * 2**672 + {0xb84687c269ef3bfb, 0x3d5d514f40eea742}, // 1e-163 * 2**669 + {0xe65829b3046b0afa, 0x0cb4a5a3112a5112}, // 1e-162 * 2**666 + {0x8ff71a0fe2c2e6dc, 0x47f0e785eaba72ab}, // 1e-161 * 2**662 + {0xb3f4e093db73a093, 0x59ed216765690f56}, // 1e-160 * 2**659 + {0xe0f218b8d25088b8, 0x306869c13ec3532c}, // 1e-159 * 2**656 + {0x8c974f7383725573, 0x1e414218c73a13fb}, // 1e-158 * 2**652 + {0xafbd2350644eeacf, 0xe5d1929ef90898fa}, // 1e-157 * 2**649 + {0xdbac6c247d62a583, 0xdf45f746b74abf39}, // 1e-156 * 2**646 + {0x894bc396ce5da772, 0x6b8bba8c328eb783}, // 1e-155 * 2**642 + {0xab9eb47c81f5114f, 0x066ea92f3f326564}, // 1e-154 * 2**639 + {0xd686619ba27255a2, 0xc80a537b0efefebd}, // 1e-153 * 2**636 + {0x8613fd0145877585, 0xbd06742ce95f5f36}, // 1e-152 * 2**632 + {0xa798fc4196e952e7, 0x2c48113823b73704}, // 1e-151 * 2**629 + {0xd17f3b51fca3a7a0, 0xf75a15862ca504c5}, // 1e-150 * 2**626 + {0x82ef85133de648c4, 0x9a984d73dbe722fb}, // 1e-149 * 2**622 + {0xa3ab66580d5fdaf5, 0xc13e60d0d2e0ebba}, // 1e-148 * 2**619 + {0xcc963fee10b7d1b3, 0x318df905079926a8}, // 1e-147 * 2**616 + {0xffbbcfe994e5c61f, 0xfdf17746497f7052}, // 1e-146 * 2**613 + {0x9fd561f1fd0f9bd3, 0xfeb6ea8bedefa633}, // 1e-145 * 2**609 + {0xc7caba6e7c5382c8, 0xfe64a52ee96b8fc0}, // 1e-144 * 2**606 + {0xf9bd690a1b68637b, 0x3dfdce7aa3c673b0}, // 1e-143 * 2**603 + {0x9c1661a651213e2d, 0x06bea10ca65c084e}, // 1e-142 * 2**599 + {0xc31bfa0fe5698db8, 0x486e494fcff30a62}, // 1e-141 * 2**596 + {0xf3e2f893dec3f126, 0x5a89dba3c3efccfa}, // 1e-140 * 2**593 + {0x986ddb5c6b3a76b7, 0xf89629465a75e01c}, // 1e-139 * 2**589 + {0xbe89523386091465, 0xf6bbb397f1135823}, // 1e-138 * 2**586 + {0xee2ba6c0678b597f, 0x746aa07ded582e2c}, // 1e-137 * 2**583 + {0x94db483840b717ef, 0xa8c2a44eb4571cdc}, // 1e-136 * 2**579 + {0xba121a4650e4ddeb, 0x92f34d62616ce413}, // 1e-135 * 2**576 + {0xe896a0d7e51e1566, 0x77b020baf9c81d17}, // 1e-134 * 2**573 + {0x915e2486ef32cd60, 0x0ace1474dc1d122e}, // 1e-133 * 2**569 + {0xb5b5ada8aaff80b8, 0x0d819992132456ba}, // 1e-132 * 2**566 + {0xe3231912d5bf60e6, 0x10e1fff697ed6c69}, // 1e-131 * 2**563 + {0x8df5efabc5979c8f, 0xca8d3ffa1ef463c1}, // 1e-130 * 2**559 + {0xb1736b96b6fd83b3, 0xbd308ff8a6b17cb2}, // 1e-129 * 2**556 + {0xddd0467c64bce4a0, 0xac7cb3f6d05ddbde}, // 1e-128 * 2**553 + {0x8aa22c0dbef60ee4, 0x6bcdf07a423aa96b}, // 1e-127 * 2**549 + {0xad4ab7112eb3929d, 0x86c16c98d2c953c6}, // 1e-126 * 2**546 + {0xd89d64d57a607744, 0xe871c7bf077ba8b7}, // 1e-125 * 2**543 + {0x87625f056c7c4a8b, 0x11471cd764ad4972}, // 1e-124 * 2**539 + {0xa93af6c6c79b5d2d, 0xd598e40d3dd89bcf}, // 1e-123 * 2**536 + {0xd389b47879823479, 0x4aff1d108d4ec2c3}, // 1e-122 * 2**533 + {0x843610cb4bf160cb, 0xcedf722a585139ba}, // 1e-121 * 2**529 + {0xa54394fe1eedb8fe, 0xc2974eb4ee658828}, // 1e-120 * 2**526 + {0xce947a3da6a9273e, 0x733d226229feea32}, // 1e-119 * 2**523 + {0x811ccc668829b887, 0x0806357d5a3f525f}, // 1e-118 * 2**519 + {0xa163ff802a3426a8, 0xca07c2dcb0cf26f7}, // 1e-117 * 2**516 + {0xc9bcff6034c13052, 0xfc89b393dd02f0b5}, // 1e-116 * 2**513 + {0xfc2c3f3841f17c67, 0xbbac2078d443ace2}, // 1e-115 * 2**510 + {0x9d9ba7832936edc0, 0xd54b944b84aa4c0d}, // 1e-114 * 2**506 + {0xc5029163f384a931, 0x0a9e795e65d4df11}, // 1e-113 * 2**503 + {0xf64335bcf065d37d, 0x4d4617b5ff4a16d5}, // 1e-112 * 2**500 + {0x99ea0196163fa42e, 0x504bced1bf8e4e45}, // 1e-111 * 2**496 + {0xc06481fb9bcf8d39, 0xe45ec2862f71e1d6}, // 1e-110 * 2**493 + {0xf07da27a82c37088, 0x5d767327bb4e5a4c}, // 1e-109 * 2**490 + {0x964e858c91ba2655, 0x3a6a07f8d510f86f}, // 1e-108 * 2**486 + {0xbbe226efb628afea, 0x890489f70a55368b}, // 1e-107 * 2**483 + {0xeadab0aba3b2dbe5, 0x2b45ac74ccea842e}, // 1e-106 * 2**480 + {0x92c8ae6b464fc96f, 0x3b0b8bc90012929d}, // 1e-105 * 2**476 + {0xb77ada0617e3bbcb, 0x09ce6ebb40173744}, // 1e-104 * 2**473 + {0xe55990879ddcaabd, 0xcc420a6a101d0515}, // 1e-103 * 2**470 + {0x8f57fa54c2a9eab6, 0x9fa946824a12232d}, // 1e-102 * 2**466 + {0xb32df8e9f3546564, 0x47939822dc96abf9}, // 1e-101 * 2**463 + {0xdff9772470297ebd, 0x59787e2b93bc56f7}, // 1e-100 * 2**460 + {0x8bfbea76c619ef36, 0x57eb4edb3c55b65a}, // 1e-99 * 2**456 + {0xaefae51477a06b03, 0xede622920b6b23f1}, // 1e-98 * 2**453 + {0xdab99e59958885c4, 0xe95fab368e45eced}, // 1e-97 * 2**450 + {0x88b402f7fd75539b, 0x11dbcb0218ebb414}, // 1e-96 * 2**446 + {0xaae103b5fcd2a881, 0xd652bdc29f26a119}, // 1e-95 * 2**443 + {0xd59944a37c0752a2, 0x4be76d3346f0495f}, // 1e-94 * 2**440 + {0x857fcae62d8493a5, 0x6f70a4400c562ddb}, // 1e-93 * 2**436 + {0xa6dfbd9fb8e5b88e, 0xcb4ccd500f6bb952}, // 1e-92 * 2**433 + {0xd097ad07a71f26b2, 0x7e2000a41346a7a7}, // 1e-91 * 2**430 + {0x825ecc24c873782f, 0x8ed400668c0c28c8}, // 1e-90 * 2**426 + {0xa2f67f2dfa90563b, 0x728900802f0f32fa}, // 1e-89 * 2**423 + {0xcbb41ef979346bca, 0x4f2b40a03ad2ffb9}, // 1e-88 * 2**420 + {0xfea126b7d78186bc, 0xe2f610c84987bfa8}, // 1e-87 * 2**417 + {0x9f24b832e6b0f436, 0x0dd9ca7d2df4d7c9}, // 1e-86 * 2**413 + {0xc6ede63fa05d3143, 0x91503d1c79720dbb}, // 1e-85 * 2**410 + {0xf8a95fcf88747d94, 0x75a44c6397ce912a}, // 1e-84 * 2**407 + {0x9b69dbe1b548ce7c, 0xc986afbe3ee11aba}, // 1e-83 * 2**403 + {0xc24452da229b021b, 0xfbe85badce996168}, // 1e-82 * 2**400 + {0xf2d56790ab41c2a2, 0xfae27299423fb9c3}, // 1e-81 * 2**397 + {0x97c560ba6b0919a5, 0xdccd879fc967d41a}, // 1e-80 * 2**393 + {0xbdb6b8e905cb600f, 0x5400e987bbc1c920}, // 1e-79 * 2**390 + {0xed246723473e3813, 0x290123e9aab23b68}, // 1e-78 * 2**387 + {0x9436c0760c86e30b, 0xf9a0b6720aaf6521}, // 1e-77 * 2**383 + {0xb94470938fa89bce, 0xf808e40e8d5b3e69}, // 1e-76 * 2**380 + {0xe7958cb87392c2c2, 0xb60b1d1230b20e04}, // 1e-75 * 2**377 + {0x90bd77f3483bb9b9, 0xb1c6f22b5e6f48c2}, // 1e-74 * 2**373 + {0xb4ecd5f01a4aa828, 0x1e38aeb6360b1af3}, // 1e-73 * 2**370 + {0xe2280b6c20dd5232, 0x25c6da63c38de1b0}, // 1e-72 * 2**367 + {0x8d590723948a535f, 0x579c487e5a38ad0e}, // 1e-71 * 2**363 + {0xb0af48ec79ace837, 0x2d835a9df0c6d851}, // 1e-70 * 2**360 + {0xdcdb1b2798182244, 0xf8e431456cf88e65}, // 1e-69 * 2**357 + {0x8a08f0f8bf0f156b, 0x1b8e9ecb641b58ff}, // 1e-68 * 2**353 + {0xac8b2d36eed2dac5, 0xe272467e3d222f3f}, // 1e-67 * 2**350 + {0xd7adf884aa879177, 0x5b0ed81dcc6abb0f}, // 1e-66 * 2**347 + {0x86ccbb52ea94baea, 0x98e947129fc2b4e9}, // 1e-65 * 2**343 + {0xa87fea27a539e9a5, 0x3f2398d747b36224}, // 1e-64 * 2**340 + {0xd29fe4b18e88640e, 0x8eec7f0d19a03aad}, // 1e-63 * 2**337 + {0x83a3eeeef9153e89, 0x1953cf68300424ac}, // 1e-62 * 2**333 + {0xa48ceaaab75a8e2b, 0x5fa8c3423c052dd7}, // 1e-61 * 2**330 + {0xcdb02555653131b6, 0x3792f412cb06794d}, // 1e-60 * 2**327 + {0x808e17555f3ebf11, 0xe2bbd88bbee40bd0}, // 1e-59 * 2**323 + {0xa0b19d2ab70e6ed6, 0x5b6aceaeae9d0ec4}, // 1e-58 * 2**320 + {0xc8de047564d20a8b, 0xf245825a5a445275}, // 1e-57 * 2**317 + {0xfb158592be068d2e, 0xeed6e2f0f0d56712}, // 1e-56 * 2**314 + {0x9ced737bb6c4183d, 0x55464dd69685606b}, // 1e-55 * 2**310 + {0xc428d05aa4751e4c, 0xaa97e14c3c26b886}, // 1e-54 * 2**307 + {0xf53304714d9265df, 0xd53dd99f4b3066a8}, // 1e-53 * 2**304 + {0x993fe2c6d07b7fab, 0xe546a8038efe4029}, // 1e-52 * 2**300 + {0xbf8fdb78849a5f96, 0xde98520472bdd033}, // 1e-51 * 2**297 + {0xef73d256a5c0f77c, 0x963e66858f6d4440}, // 1e-50 * 2**294 + {0x95a8637627989aad, 0xdde7001379a44aa8}, // 1e-49 * 2**290 + {0xbb127c53b17ec159, 0x5560c018580d5d52}, // 1e-48 * 2**287 + {0xe9d71b689dde71af, 0xaab8f01e6e10b4a6}, // 1e-47 * 2**284 + {0x9226712162ab070d, 0xcab3961304ca70e8}, // 1e-46 * 2**280 + {0xb6b00d69bb55c8d1, 0x3d607b97c5fd0d22}, // 1e-45 * 2**277 + {0xe45c10c42a2b3b05, 0x8cb89a7db77c506a}, // 1e-44 * 2**274 + {0x8eb98a7a9a5b04e3, 0x77f3608e92adb242}, // 1e-43 * 2**270 + {0xb267ed1940f1c61c, 0x55f038b237591ed3}, // 1e-42 * 2**267 + {0xdf01e85f912e37a3, 0x6b6c46dec52f6688}, // 1e-41 * 2**264 + {0x8b61313bbabce2c6, 0x2323ac4b3b3da015}, // 1e-40 * 2**260 + {0xae397d8aa96c1b77, 0xabec975e0a0d081a}, // 1e-39 * 2**257 + {0xd9c7dced53c72255, 0x96e7bd358c904a21}, // 1e-38 * 2**254 + {0x881cea14545c7575, 0x7e50d64177da2e54}, // 1e-37 * 2**250 + {0xaa242499697392d2, 0xdde50bd1d5d0b9e9}, // 1e-36 * 2**247 + {0xd4ad2dbfc3d07787, 0x955e4ec64b44e864}, // 1e-35 * 2**244 + {0x84ec3c97da624ab4, 0xbd5af13bef0b113e}, // 1e-34 * 2**240 + {0xa6274bbdd0fadd61, 0xecb1ad8aeacdd58e}, // 1e-33 * 2**237 + {0xcfb11ead453994ba, 0x67de18eda5814af2}, // 1e-32 * 2**234 + {0x81ceb32c4b43fcf4, 0x80eacf948770ced7}, // 1e-31 * 2**230 + {0xa2425ff75e14fc31, 0xa1258379a94d028d}, // 1e-30 * 2**227 + {0xcad2f7f5359a3b3e, 0x096ee45813a04330}, // 1e-29 * 2**224 + {0xfd87b5f28300ca0d, 0x8bca9d6e188853fc}, // 1e-28 * 2**221 + {0x9e74d1b791e07e48, 0x775ea264cf55347d}, // 1e-27 * 2**217 + {0xc612062576589dda, 0x95364afe032a819d}, // 1e-26 * 2**214 + {0xf79687aed3eec551, 0x3a83ddbd83f52204}, // 1e-25 * 2**211 + {0x9abe14cd44753b52, 0xc4926a9672793542}, // 1e-24 * 2**207 + {0xc16d9a0095928a27, 0x75b7053c0f178293}, // 1e-23 * 2**204 + {0xf1c90080baf72cb1, 0x5324c68b12dd6338}, // 1e-22 * 2**201 + {0x971da05074da7bee, 0xd3f6fc16ebca5e03}, // 1e-21 * 2**197 + {0xbce5086492111aea, 0x88f4bb1ca6bcf584}, // 1e-20 * 2**194 + {0xec1e4a7db69561a5, 0x2b31e9e3d06c32e5}, // 1e-19 * 2**191 + {0x9392ee8e921d5d07, 0x3aff322e62439fcf}, // 1e-18 * 2**187 + {0xb877aa3236a4b449, 0x09befeb9fad487c2}, // 1e-17 * 2**184 + {0xe69594bec44de15b, 0x4c2ebe687989a9b3}, // 1e-16 * 2**181 + {0x901d7cf73ab0acd9, 0x0f9d37014bf60a10}, // 1e-15 * 2**177 + {0xb424dc35095cd80f, 0x538484c19ef38c94}, // 1e-14 * 2**174 + {0xe12e13424bb40e13, 0x2865a5f206b06fb9}, // 1e-13 * 2**171 + {0x8cbccc096f5088cb, 0xf93f87b7442e45d3}, // 1e-12 * 2**167 + {0xafebff0bcb24aafe, 0xf78f69a51539d748}, // 1e-11 * 2**164 + {0xdbe6fecebdedd5be, 0xb573440e5a884d1b}, // 1e-10 * 2**161 + {0x89705f4136b4a597, 0x31680a88f8953030}, // 1e-9 * 2**157 + {0xabcc77118461cefc, 0xfdc20d2b36ba7c3d}, // 1e-8 * 2**154 + {0xd6bf94d5e57a42bc, 0x3d32907604691b4c}, // 1e-7 * 2**151 + {0x8637bd05af6c69b5, 0xa63f9a49c2c1b10f}, // 1e-6 * 2**147 + {0xa7c5ac471b478423, 0x0fcf80dc33721d53}, // 1e-5 * 2**144 + {0xd1b71758e219652b, 0xd3c36113404ea4a8}, // 1e-4 * 2**141 + {0x83126e978d4fdf3b, 0x645a1cac083126e9}, // 1e-3 * 2**137 + {0xa3d70a3d70a3d70a, 0x3d70a3d70a3d70a3}, // 1e-2 * 2**134 + {0xcccccccccccccccc, 0xcccccccccccccccc}, // 1e-1 * 2**131 + {0x8000000000000000, 0x0000000000000000}, // 1e0 * 2**127 + {0xa000000000000000, 0x0000000000000000}, // 1e1 * 2**124 + {0xc800000000000000, 0x0000000000000000}, // 1e2 * 2**121 + {0xfa00000000000000, 0x0000000000000000}, // 1e3 * 2**118 + {0x9c40000000000000, 0x0000000000000000}, // 1e4 * 2**114 + {0xc350000000000000, 0x0000000000000000}, // 1e5 * 2**111 + {0xf424000000000000, 0x0000000000000000}, // 1e6 * 2**108 + {0x9896800000000000, 0x0000000000000000}, // 1e7 * 2**104 + {0xbebc200000000000, 0x0000000000000000}, // 1e8 * 2**101 + {0xee6b280000000000, 0x0000000000000000}, // 1e9 * 2**98 + {0x9502f90000000000, 0x0000000000000000}, // 1e10 * 2**94 + {0xba43b74000000000, 0x0000000000000000}, // 1e11 * 2**91 + {0xe8d4a51000000000, 0x0000000000000000}, // 1e12 * 2**88 + {0x9184e72a00000000, 0x0000000000000000}, // 1e13 * 2**84 + {0xb5e620f480000000, 0x0000000000000000}, // 1e14 * 2**81 + {0xe35fa931a0000000, 0x0000000000000000}, // 1e15 * 2**78 + {0x8e1bc9bf04000000, 0x0000000000000000}, // 1e16 * 2**74 + {0xb1a2bc2ec5000000, 0x0000000000000000}, // 1e17 * 2**71 + {0xde0b6b3a76400000, 0x0000000000000000}, // 1e18 * 2**68 + {0x8ac7230489e80000, 0x0000000000000000}, // 1e19 * 2**64 + {0xad78ebc5ac620000, 0x0000000000000000}, // 1e20 * 2**61 + {0xd8d726b7177a8000, 0x0000000000000000}, // 1e21 * 2**58 + {0x878678326eac9000, 0x0000000000000000}, // 1e22 * 2**54 + {0xa968163f0a57b400, 0x0000000000000000}, // 1e23 * 2**51 + {0xd3c21bcecceda100, 0x0000000000000000}, // 1e24 * 2**48 + {0x84595161401484a0, 0x0000000000000000}, // 1e25 * 2**44 + {0xa56fa5b99019a5c8, 0x0000000000000000}, // 1e26 * 2**41 + {0xcecb8f27f4200f3a, 0x0000000000000000}, // 1e27 * 2**38 + {0x813f3978f8940984, 0x4000000000000000}, // 1e28 * 2**34 + {0xa18f07d736b90be5, 0x5000000000000000}, // 1e29 * 2**31 + {0xc9f2c9cd04674ede, 0xa400000000000000}, // 1e30 * 2**28 + {0xfc6f7c4045812296, 0x4d00000000000000}, // 1e31 * 2**25 + {0x9dc5ada82b70b59d, 0xf020000000000000}, // 1e32 * 2**21 + {0xc5371912364ce305, 0x6c28000000000000}, // 1e33 * 2**18 + {0xf684df56c3e01bc6, 0xc732000000000000}, // 1e34 * 2**15 + {0x9a130b963a6c115c, 0x3c7f400000000000}, // 1e35 * 2**11 + {0xc097ce7bc90715b3, 0x4b9f100000000000}, // 1e36 * 2**8 + {0xf0bdc21abb48db20, 0x1e86d40000000000}, // 1e37 * 2**5 + {0x96769950b50d88f4, 0x1314448000000000}, // 1e38 * 2**1 + {0xbc143fa4e250eb31, 0x17d955a000000000}, // 1e39 * 2**-2 + {0xeb194f8e1ae525fd, 0x5dcfab0800000000}, // 1e40 * 2**-5 + {0x92efd1b8d0cf37be, 0x5aa1cae500000000}, // 1e41 * 2**-9 + {0xb7abc627050305ad, 0xf14a3d9e40000000}, // 1e42 * 2**-12 + {0xe596b7b0c643c719, 0x6d9ccd05d0000000}, // 1e43 * 2**-15 + {0x8f7e32ce7bea5c6f, 0xe4820023a2000000}, // 1e44 * 2**-19 + {0xb35dbf821ae4f38b, 0xdda2802c8a800000}, // 1e45 * 2**-22 + {0xe0352f62a19e306e, 0xd50b2037ad200000}, // 1e46 * 2**-25 + {0x8c213d9da502de45, 0x4526f422cc340000}, // 1e47 * 2**-29 + {0xaf298d050e4395d6, 0x9670b12b7f410000}, // 1e48 * 2**-32 + {0xdaf3f04651d47b4c, 0x3c0cdd765f114000}, // 1e49 * 2**-35 + {0x88d8762bf324cd0f, 0xa5880a69fb6ac800}, // 1e50 * 2**-39 + {0xab0e93b6efee0053, 0x8eea0d047a457a00}, // 1e51 * 2**-42 + {0xd5d238a4abe98068, 0x72a4904598d6d880}, // 1e52 * 2**-45 + {0x85a36366eb71f041, 0x47a6da2b7f864750}, // 1e53 * 2**-49 + {0xa70c3c40a64e6c51, 0x999090b65f67d924}, // 1e54 * 2**-52 + {0xd0cf4b50cfe20765, 0xfff4b4e3f741cf6d}, // 1e55 * 2**-55 + {0x82818f1281ed449f, 0xbff8f10e7a8921a4}, // 1e56 * 2**-59 + {0xa321f2d7226895c7, 0xaff72d52192b6a0d}, // 1e57 * 2**-62 + {0xcbea6f8ceb02bb39, 0x9bf4f8a69f764490}, // 1e58 * 2**-65 + {0xfee50b7025c36a08, 0x02f236d04753d5b4}, // 1e59 * 2**-68 + {0x9f4f2726179a2245, 0x01d762422c946590}, // 1e60 * 2**-72 + {0xc722f0ef9d80aad6, 0x424d3ad2b7b97ef5}, // 1e61 * 2**-75 + {0xf8ebad2b84e0d58b, 0xd2e0898765a7deb2}, // 1e62 * 2**-78 + {0x9b934c3b330c8577, 0x63cc55f49f88eb2f}, // 1e63 * 2**-82 + {0xc2781f49ffcfa6d5, 0x3cbf6b71c76b25fb}, // 1e64 * 2**-85 + {0xf316271c7fc3908a, 0x8bef464e3945ef7a}, // 1e65 * 2**-88 + {0x97edd871cfda3a56, 0x97758bf0e3cbb5ac}, // 1e66 * 2**-92 + {0xbde94e8e43d0c8ec, 0x3d52eeed1cbea317}, // 1e67 * 2**-95 + {0xed63a231d4c4fb27, 0x4ca7aaa863ee4bdd}, // 1e68 * 2**-98 + {0x945e455f24fb1cf8, 0x8fe8caa93e74ef6a}, // 1e69 * 2**-102 + {0xb975d6b6ee39e436, 0xb3e2fd538e122b44}, // 1e70 * 2**-105 + {0xe7d34c64a9c85d44, 0x60dbbca87196b616}, // 1e71 * 2**-108 + {0x90e40fbeea1d3a4a, 0xbc8955e946fe31cd}, // 1e72 * 2**-112 + {0xb51d13aea4a488dd, 0x6babab6398bdbe41}, // 1e73 * 2**-115 + {0xe264589a4dcdab14, 0xc696963c7eed2dd1}, // 1e74 * 2**-118 + {0x8d7eb76070a08aec, 0xfc1e1de5cf543ca2}, // 1e75 * 2**-122 + {0xb0de65388cc8ada8, 0x3b25a55f43294bcb}, // 1e76 * 2**-125 + {0xdd15fe86affad912, 0x49ef0eb713f39ebe}, // 1e77 * 2**-128 + {0x8a2dbf142dfcc7ab, 0x6e3569326c784337}, // 1e78 * 2**-132 + {0xacb92ed9397bf996, 0x49c2c37f07965404}, // 1e79 * 2**-135 + {0xd7e77a8f87daf7fb, 0xdc33745ec97be906}, // 1e80 * 2**-138 + {0x86f0ac99b4e8dafd, 0x69a028bb3ded71a3}, // 1e81 * 2**-142 + {0xa8acd7c0222311bc, 0xc40832ea0d68ce0c}, // 1e82 * 2**-145 + {0xd2d80db02aabd62b, 0xf50a3fa490c30190}, // 1e83 * 2**-148 + {0x83c7088e1aab65db, 0x792667c6da79e0fa}, // 1e84 * 2**-152 + {0xa4b8cab1a1563f52, 0x577001b891185938}, // 1e85 * 2**-155 + {0xcde6fd5e09abcf26, 0xed4c0226b55e6f86}, // 1e86 * 2**-158 + {0x80b05e5ac60b6178, 0x544f8158315b05b4}, // 1e87 * 2**-162 + {0xa0dc75f1778e39d6, 0x696361ae3db1c721}, // 1e88 * 2**-165 + {0xc913936dd571c84c, 0x03bc3a19cd1e38e9}, // 1e89 * 2**-168 + {0xfb5878494ace3a5f, 0x04ab48a04065c723}, // 1e90 * 2**-171 + {0x9d174b2dcec0e47b, 0x62eb0d64283f9c76}, // 1e91 * 2**-175 + {0xc45d1df942711d9a, 0x3ba5d0bd324f8394}, // 1e92 * 2**-178 + {0xf5746577930d6500, 0xca8f44ec7ee36479}, // 1e93 * 2**-181 + {0x9968bf6abbe85f20, 0x7e998b13cf4e1ecb}, // 1e94 * 2**-185 + {0xbfc2ef456ae276e8, 0x9e3fedd8c321a67e}, // 1e95 * 2**-188 + {0xefb3ab16c59b14a2, 0xc5cfe94ef3ea101e}, // 1e96 * 2**-191 + {0x95d04aee3b80ece5, 0xbba1f1d158724a12}, // 1e97 * 2**-195 + {0xbb445da9ca61281f, 0x2a8a6e45ae8edc97}, // 1e98 * 2**-198 + {0xea1575143cf97226, 0xf52d09d71a3293bd}, // 1e99 * 2**-201 + {0x924d692ca61be758, 0x593c2626705f9c56}, // 1e100 * 2**-205 + {0xb6e0c377cfa2e12e, 0x6f8b2fb00c77836c}, // 1e101 * 2**-208 + {0xe498f455c38b997a, 0x0b6dfb9c0f956447}, // 1e102 * 2**-211 + {0x8edf98b59a373fec, 0x4724bd4189bd5eac}, // 1e103 * 2**-215 + {0xb2977ee300c50fe7, 0x58edec91ec2cb657}, // 1e104 * 2**-218 + {0xdf3d5e9bc0f653e1, 0x2f2967b66737e3ed}, // 1e105 * 2**-221 + {0x8b865b215899f46c, 0xbd79e0d20082ee74}, // 1e106 * 2**-225 + {0xae67f1e9aec07187, 0xecd8590680a3aa11}, // 1e107 * 2**-228 + {0xda01ee641a708de9, 0xe80e6f4820cc9495}, // 1e108 * 2**-231 + {0x884134fe908658b2, 0x3109058d147fdcdd}, // 1e109 * 2**-235 + {0xaa51823e34a7eede, 0xbd4b46f0599fd415}, // 1e110 * 2**-238 + {0xd4e5e2cdc1d1ea96, 0x6c9e18ac7007c91a}, // 1e111 * 2**-241 + {0x850fadc09923329e, 0x03e2cf6bc604ddb0}, // 1e112 * 2**-245 + {0xa6539930bf6bff45, 0x84db8346b786151c}, // 1e113 * 2**-248 + {0xcfe87f7cef46ff16, 0xe612641865679a63}, // 1e114 * 2**-251 + {0x81f14fae158c5f6e, 0x4fcb7e8f3f60c07e}, // 1e115 * 2**-255 + {0xa26da3999aef7749, 0xe3be5e330f38f09d}, // 1e116 * 2**-258 + {0xcb090c8001ab551c, 0x5cadf5bfd3072cc5}, // 1e117 * 2**-261 + {0xfdcb4fa002162a63, 0x73d9732fc7c8f7f6}, // 1e118 * 2**-264 + {0x9e9f11c4014dda7e, 0x2867e7fddcdd9afa}, // 1e119 * 2**-268 + {0xc646d63501a1511d, 0xb281e1fd541501b8}, // 1e120 * 2**-271 + {0xf7d88bc24209a565, 0x1f225a7ca91a4226}, // 1e121 * 2**-274 + {0x9ae757596946075f, 0x3375788de9b06958}, // 1e122 * 2**-278 + {0xc1a12d2fc3978937, 0x0052d6b1641c83ae}, // 1e123 * 2**-281 + {0xf209787bb47d6b84, 0xc0678c5dbd23a49a}, // 1e124 * 2**-284 + {0x9745eb4d50ce6332, 0xf840b7ba963646e0}, // 1e125 * 2**-288 + {0xbd176620a501fbff, 0xb650e5a93bc3d898}, // 1e126 * 2**-291 + {0xec5d3fa8ce427aff, 0xa3e51f138ab4cebe}, // 1e127 * 2**-294 + {0x93ba47c980e98cdf, 0xc66f336c36b10137}, // 1e128 * 2**-298 + {0xb8a8d9bbe123f017, 0xb80b0047445d4184}, // 1e129 * 2**-301 + {0xe6d3102ad96cec1d, 0xa60dc059157491e5}, // 1e130 * 2**-304 + {0x9043ea1ac7e41392, 0x87c89837ad68db2f}, // 1e131 * 2**-308 + {0xb454e4a179dd1877, 0x29babe4598c311fb}, // 1e132 * 2**-311 + {0xe16a1dc9d8545e94, 0xf4296dd6fef3d67a}, // 1e133 * 2**-314 + {0x8ce2529e2734bb1d, 0x1899e4a65f58660c}, // 1e134 * 2**-318 + {0xb01ae745b101e9e4, 0x5ec05dcff72e7f8f}, // 1e135 * 2**-321 + {0xdc21a1171d42645d, 0x76707543f4fa1f73}, // 1e136 * 2**-324 + {0x899504ae72497eba, 0x6a06494a791c53a8}, // 1e137 * 2**-328 + {0xabfa45da0edbde69, 0x0487db9d17636892}, // 1e138 * 2**-331 + {0xd6f8d7509292d603, 0x45a9d2845d3c42b6}, // 1e139 * 2**-334 + {0x865b86925b9bc5c2, 0x0b8a2392ba45a9b2}, // 1e140 * 2**-338 + {0xa7f26836f282b732, 0x8e6cac7768d7141e}, // 1e141 * 2**-341 + {0xd1ef0244af2364ff, 0x3207d795430cd926}, // 1e142 * 2**-344 + {0x8335616aed761f1f, 0x7f44e6bd49e807b8}, // 1e143 * 2**-348 + {0xa402b9c5a8d3a6e7, 0x5f16206c9c6209a6}, // 1e144 * 2**-351 + {0xcd036837130890a1, 0x36dba887c37a8c0f}, // 1e145 * 2**-354 + {0x802221226be55a64, 0xc2494954da2c9789}, // 1e146 * 2**-358 + {0xa02aa96b06deb0fd, 0xf2db9baa10b7bd6c}, // 1e147 * 2**-361 + {0xc83553c5c8965d3d, 0x6f92829494e5acc7}, // 1e148 * 2**-364 + {0xfa42a8b73abbf48c, 0xcb772339ba1f17f9}, // 1e149 * 2**-367 + {0x9c69a97284b578d7, 0xff2a760414536efb}, // 1e150 * 2**-371 + {0xc38413cf25e2d70d, 0xfef5138519684aba}, // 1e151 * 2**-374 + {0xf46518c2ef5b8cd1, 0x7eb258665fc25d69}, // 1e152 * 2**-377 + {0x98bf2f79d5993802, 0xef2f773ffbd97a61}, // 1e153 * 2**-381 + {0xbeeefb584aff8603, 0xaafb550ffacfd8fa}, // 1e154 * 2**-384 + {0xeeaaba2e5dbf6784, 0x95ba2a53f983cf38}, // 1e155 * 2**-387 + {0x952ab45cfa97a0b2, 0xdd945a747bf26183}, // 1e156 * 2**-391 + {0xba756174393d88df, 0x94f971119aeef9e4}, // 1e157 * 2**-394 + {0xe912b9d1478ceb17, 0x7a37cd5601aab85d}, // 1e158 * 2**-397 + {0x91abb422ccb812ee, 0xac62e055c10ab33a}, // 1e159 * 2**-401 + {0xb616a12b7fe617aa, 0x577b986b314d6009}, // 1e160 * 2**-404 + {0xe39c49765fdf9d94, 0xed5a7e85fda0b80b}, // 1e161 * 2**-407 + {0x8e41ade9fbebc27d, 0x14588f13be847307}, // 1e162 * 2**-411 + {0xb1d219647ae6b31c, 0x596eb2d8ae258fc8}, // 1e163 * 2**-414 + {0xde469fbd99a05fe3, 0x6fca5f8ed9aef3bb}, // 1e164 * 2**-417 + {0x8aec23d680043bee, 0x25de7bb9480d5854}, // 1e165 * 2**-421 + {0xada72ccc20054ae9, 0xaf561aa79a10ae6a}, // 1e166 * 2**-424 + {0xd910f7ff28069da4, 0x1b2ba1518094da04}, // 1e167 * 2**-427 + {0x87aa9aff79042286, 0x90fb44d2f05d0842}, // 1e168 * 2**-431 + {0xa99541bf57452b28, 0x353a1607ac744a53}, // 1e169 * 2**-434 + {0xd3fa922f2d1675f2, 0x42889b8997915ce8}, // 1e170 * 2**-437 + {0x847c9b5d7c2e09b7, 0x69956135febada11}, // 1e171 * 2**-441 + {0xa59bc234db398c25, 0x43fab9837e699095}, // 1e172 * 2**-444 + {0xcf02b2c21207ef2e, 0x94f967e45e03f4bb}, // 1e173 * 2**-447 + {0x8161afb94b44f57d, 0x1d1be0eebac278f5}, // 1e174 * 2**-451 + {0xa1ba1ba79e1632dc, 0x6462d92a69731732}, // 1e175 * 2**-454 + {0xca28a291859bbf93, 0x7d7b8f7503cfdcfe}, // 1e176 * 2**-457 + {0xfcb2cb35e702af78, 0x5cda735244c3d43e}, // 1e177 * 2**-460 + {0x9defbf01b061adab, 0x3a0888136afa64a7}, // 1e178 * 2**-464 + {0xc56baec21c7a1916, 0x088aaa1845b8fdd0}, // 1e179 * 2**-467 + {0xf6c69a72a3989f5b, 0x8aad549e57273d45}, // 1e180 * 2**-470 + {0x9a3c2087a63f6399, 0x36ac54e2f678864b}, // 1e181 * 2**-474 + {0xc0cb28a98fcf3c7f, 0x84576a1bb416a7dd}, // 1e182 * 2**-477 + {0xf0fdf2d3f3c30b9f, 0x656d44a2a11c51d5}, // 1e183 * 2**-480 + {0x969eb7c47859e743, 0x9f644ae5a4b1b325}, // 1e184 * 2**-484 + {0xbc4665b596706114, 0x873d5d9f0dde1fee}, // 1e185 * 2**-487 + {0xeb57ff22fc0c7959, 0xa90cb506d155a7ea}, // 1e186 * 2**-490 + {0x9316ff75dd87cbd8, 0x09a7f12442d588f2}, // 1e187 * 2**-494 + {0xb7dcbf5354e9bece, 0x0c11ed6d538aeb2f}, // 1e188 * 2**-497 + {0xe5d3ef282a242e81, 0x8f1668c8a86da5fa}, // 1e189 * 2**-500 + {0x8fa475791a569d10, 0xf96e017d694487bc}, // 1e190 * 2**-504 + {0xb38d92d760ec4455, 0x37c981dcc395a9ac}, // 1e191 * 2**-507 + {0xe070f78d3927556a, 0x85bbe253f47b1417}, // 1e192 * 2**-510 + {0x8c469ab843b89562, 0x93956d7478ccec8e}, // 1e193 * 2**-514 + {0xaf58416654a6babb, 0x387ac8d1970027b2}, // 1e194 * 2**-517 + {0xdb2e51bfe9d0696a, 0x06997b05fcc0319e}, // 1e195 * 2**-520 + {0x88fcf317f22241e2, 0x441fece3bdf81f03}, // 1e196 * 2**-524 + {0xab3c2fddeeaad25a, 0xd527e81cad7626c3}, // 1e197 * 2**-527 + {0xd60b3bd56a5586f1, 0x8a71e223d8d3b074}, // 1e198 * 2**-530 + {0x85c7056562757456, 0xf6872d5667844e49}, // 1e199 * 2**-534 + {0xa738c6bebb12d16c, 0xb428f8ac016561db}, // 1e200 * 2**-537 + {0xd106f86e69d785c7, 0xe13336d701beba52}, // 1e201 * 2**-540 + {0x82a45b450226b39c, 0xecc0024661173473}, // 1e202 * 2**-544 + {0xa34d721642b06084, 0x27f002d7f95d0190}, // 1e203 * 2**-547 + {0xcc20ce9bd35c78a5, 0x31ec038df7b441f4}, // 1e204 * 2**-550 + {0xff290242c83396ce, 0x7e67047175a15271}, // 1e205 * 2**-553 + {0x9f79a169bd203e41, 0x0f0062c6e984d386}, // 1e206 * 2**-557 + {0xc75809c42c684dd1, 0x52c07b78a3e60868}, // 1e207 * 2**-560 + {0xf92e0c3537826145, 0xa7709a56ccdf8a82}, // 1e208 * 2**-563 + {0x9bbcc7a142b17ccb, 0x88a66076400bb691}, // 1e209 * 2**-567 + {0xc2abf989935ddbfe, 0x6acff893d00ea435}, // 1e210 * 2**-570 + {0xf356f7ebf83552fe, 0x0583f6b8c4124d43}, // 1e211 * 2**-573 + {0x98165af37b2153de, 0xc3727a337a8b704a}, // 1e212 * 2**-577 + {0xbe1bf1b059e9a8d6, 0x744f18c0592e4c5c}, // 1e213 * 2**-580 + {0xeda2ee1c7064130c, 0x1162def06f79df73}, // 1e214 * 2**-583 + {0x9485d4d1c63e8be7, 0x8addcb5645ac2ba8}, // 1e215 * 2**-587 + {0xb9a74a0637ce2ee1, 0x6d953e2bd7173692}, // 1e216 * 2**-590 + {0xe8111c87c5c1ba99, 0xc8fa8db6ccdd0437}, // 1e217 * 2**-593 + {0x910ab1d4db9914a0, 0x1d9c9892400a22a2}, // 1e218 * 2**-597 + {0xb54d5e4a127f59c8, 0x2503beb6d00cab4b}, // 1e219 * 2**-600 + {0xe2a0b5dc971f303a, 0x2e44ae64840fd61d}, // 1e220 * 2**-603 + {0x8da471a9de737e24, 0x5ceaecfed289e5d2}, // 1e221 * 2**-607 + {0xb10d8e1456105dad, 0x7425a83e872c5f47}, // 1e222 * 2**-610 + {0xdd50f1996b947518, 0xd12f124e28f77719}, // 1e223 * 2**-613 + {0x8a5296ffe33cc92f, 0x82bd6b70d99aaa6f}, // 1e224 * 2**-617 + {0xace73cbfdc0bfb7b, 0x636cc64d1001550b}, // 1e225 * 2**-620 + {0xd8210befd30efa5a, 0x3c47f7e05401aa4e}, // 1e226 * 2**-623 + {0x8714a775e3e95c78, 0x65acfaec34810a71}, // 1e227 * 2**-627 + {0xa8d9d1535ce3b396, 0x7f1839a741a14d0d}, // 1e228 * 2**-630 + {0xd31045a8341ca07c, 0x1ede48111209a050}, // 1e229 * 2**-633 + {0x83ea2b892091e44d, 0x934aed0aab460432}, // 1e230 * 2**-637 + {0xa4e4b66b68b65d60, 0xf81da84d5617853f}, // 1e231 * 2**-640 + {0xce1de40642e3f4b9, 0x36251260ab9d668e}, // 1e232 * 2**-643 + {0x80d2ae83e9ce78f3, 0xc1d72b7c6b426019}, // 1e233 * 2**-647 + {0xa1075a24e4421730, 0xb24cf65b8612f81f}, // 1e234 * 2**-650 + {0xc94930ae1d529cfc, 0xdee033f26797b627}, // 1e235 * 2**-653 + {0xfb9b7cd9a4a7443c, 0x169840ef017da3b1}, // 1e236 * 2**-656 + {0x9d412e0806e88aa5, 0x8e1f289560ee864e}, // 1e237 * 2**-660 + {0xc491798a08a2ad4e, 0xf1a6f2bab92a27e2}, // 1e238 * 2**-663 + {0xf5b5d7ec8acb58a2, 0xae10af696774b1db}, // 1e239 * 2**-666 + {0x9991a6f3d6bf1765, 0xacca6da1e0a8ef29}, // 1e240 * 2**-670 + {0xbff610b0cc6edd3f, 0x17fd090a58d32af3}, // 1e241 * 2**-673 + {0xeff394dcff8a948e, 0xddfc4b4cef07f5b0}, // 1e242 * 2**-676 + {0x95f83d0a1fb69cd9, 0x4abdaf101564f98e}, // 1e243 * 2**-680 + {0xbb764c4ca7a4440f, 0x9d6d1ad41abe37f1}, // 1e244 * 2**-683 + {0xea53df5fd18d5513, 0x84c86189216dc5ed}, // 1e245 * 2**-686 + {0x92746b9be2f8552c, 0x32fd3cf5b4e49bb4}, // 1e246 * 2**-690 + {0xb7118682dbb66a77, 0x3fbc8c33221dc2a1}, // 1e247 * 2**-693 + {0xe4d5e82392a40515, 0x0fabaf3feaa5334a}, // 1e248 * 2**-696 + {0x8f05b1163ba6832d, 0x29cb4d87f2a7400e}, // 1e249 * 2**-700 + {0xb2c71d5bca9023f8, 0x743e20e9ef511012}, // 1e250 * 2**-703 + {0xdf78e4b2bd342cf6, 0x914da9246b255416}, // 1e251 * 2**-706 + {0x8bab8eefb6409c1a, 0x1ad089b6c2f7548e}, // 1e252 * 2**-710 + {0xae9672aba3d0c320, 0xa184ac2473b529b1}, // 1e253 * 2**-713 + {0xda3c0f568cc4f3e8, 0xc9e5d72d90a2741e}, // 1e254 * 2**-716 + {0x8865899617fb1871, 0x7e2fa67c7a658892}, // 1e255 * 2**-720 + {0xaa7eebfb9df9de8d, 0xddbb901b98feeab7}, // 1e256 * 2**-723 + {0xd51ea6fa85785631, 0x552a74227f3ea565}, // 1e257 * 2**-726 + {0x8533285c936b35de, 0xd53a88958f87275f}, // 1e258 * 2**-730 + {0xa67ff273b8460356, 0x8a892abaf368f137}, // 1e259 * 2**-733 + {0xd01fef10a657842c, 0x2d2b7569b0432d85}, // 1e260 * 2**-736 + {0x8213f56a67f6b29b, 0x9c3b29620e29fc73}, // 1e261 * 2**-740 + {0xa298f2c501f45f42, 0x8349f3ba91b47b8f}, // 1e262 * 2**-743 + {0xcb3f2f7642717713, 0x241c70a936219a73}, // 1e263 * 2**-746 + {0xfe0efb53d30dd4d7, 0xed238cd383aa0110}, // 1e264 * 2**-749 + {0x9ec95d1463e8a506, 0xf4363804324a40aa}, // 1e265 * 2**-753 + {0xc67bb4597ce2ce48, 0xb143c6053edcd0d5}, // 1e266 * 2**-756 + {0xf81aa16fdc1b81da, 0xdd94b7868e94050a}, // 1e267 * 2**-759 + {0x9b10a4e5e9913128, 0xca7cf2b4191c8326}, // 1e268 * 2**-763 + {0xc1d4ce1f63f57d72, 0xfd1c2f611f63a3f0}, // 1e269 * 2**-766 + {0xf24a01a73cf2dccf, 0xbc633b39673c8cec}, // 1e270 * 2**-769 + {0x976e41088617ca01, 0xd5be0503e085d813}, // 1e271 * 2**-773 + {0xbd49d14aa79dbc82, 0x4b2d8644d8a74e18}, // 1e272 * 2**-776 + {0xec9c459d51852ba2, 0xddf8e7d60ed1219e}, // 1e273 * 2**-779 + {0x93e1ab8252f33b45, 0xcabb90e5c942b503}, // 1e274 * 2**-783 + {0xb8da1662e7b00a17, 0x3d6a751f3b936243}, // 1e275 * 2**-786 + {0xe7109bfba19c0c9d, 0x0cc512670a783ad4}, // 1e276 * 2**-789 + {0x906a617d450187e2, 0x27fb2b80668b24c5}, // 1e277 * 2**-793 + {0xb484f9dc9641e9da, 0xb1f9f660802dedf6}, // 1e278 * 2**-796 + {0xe1a63853bbd26451, 0x5e7873f8a0396973}, // 1e279 * 2**-799 + {0x8d07e33455637eb2, 0xdb0b487b6423e1e8}, // 1e280 * 2**-803 + {0xb049dc016abc5e5f, 0x91ce1a9a3d2cda62}, // 1e281 * 2**-806 + {0xdc5c5301c56b75f7, 0x7641a140cc7810fb}, // 1e282 * 2**-809 + {0x89b9b3e11b6329ba, 0xa9e904c87fcb0a9d}, // 1e283 * 2**-813 + {0xac2820d9623bf429, 0x546345fa9fbdcd44}, // 1e284 * 2**-816 + {0xd732290fbacaf133, 0xa97c177947ad4095}, // 1e285 * 2**-819 + {0x867f59a9d4bed6c0, 0x49ed8eabcccc485d}, // 1e286 * 2**-823 + {0xa81f301449ee8c70, 0x5c68f256bfff5a74}, // 1e287 * 2**-826 + {0xd226fc195c6a2f8c, 0x73832eec6fff3111}, // 1e288 * 2**-829 + {0x83585d8fd9c25db7, 0xc831fd53c5ff7eab}, // 1e289 * 2**-833 + {0xa42e74f3d032f525, 0xba3e7ca8b77f5e55}, // 1e290 * 2**-836 + {0xcd3a1230c43fb26f, 0x28ce1bd2e55f35eb}, // 1e291 * 2**-839 + {0x80444b5e7aa7cf85, 0x7980d163cf5b81b3}, // 1e292 * 2**-843 + {0xa0555e361951c366, 0xd7e105bcc332621f}, // 1e293 * 2**-846 + {0xc86ab5c39fa63440, 0x8dd9472bf3fefaa7}, // 1e294 * 2**-849 + {0xfa856334878fc150, 0xb14f98f6f0feb951}, // 1e295 * 2**-852 + {0x9c935e00d4b9d8d2, 0x6ed1bf9a569f33d3}, // 1e296 * 2**-856 + {0xc3b8358109e84f07, 0x0a862f80ec4700c8}, // 1e297 * 2**-859 + {0xf4a642e14c6262c8, 0xcd27bb612758c0fa}, // 1e298 * 2**-862 + {0x98e7e9cccfbd7dbd, 0x8038d51cb897789c}, // 1e299 * 2**-866 + {0xbf21e44003acdd2c, 0xe0470a63e6bd56c3}, // 1e300 * 2**-869 + {0xeeea5d5004981478, 0x1858ccfce06cac74}, // 1e301 * 2**-872 + {0x95527a5202df0ccb, 0x0f37801e0c43ebc8}, // 1e302 * 2**-876 + {0xbaa718e68396cffd, 0xd30560258f54e6ba}, // 1e303 * 2**-879 + {0xe950df20247c83fd, 0x47c6b82ef32a2069}, // 1e304 * 2**-882 + {0x91d28b7416cdd27e, 0x4cdc331d57fa5441}, // 1e305 * 2**-886 + {0xb6472e511c81471d, 0xe0133fe4adf8e952}, // 1e306 * 2**-889 + {0xe3d8f9e563a198e5, 0x58180fddd97723a6}, // 1e307 * 2**-892 + {0x8e679c2f5e44ff8f, 0x570f09eaa7ea7648}, // 1e308 * 2**-896 + {0xb201833b35d63f73, 0x2cd2cc6551e513da}, // 1e309 * 2**-899 + {0xde81e40a034bcf4f, 0xf8077f7ea65e58d1}, // 1e310 * 2**-902 + {0x8b112e86420f6191, 0xfb04afaf27faf782}, // 1e311 * 2**-906 + {0xadd57a27d29339f6, 0x79c5db9af1f9b563}, // 1e312 * 2**-909 + {0xd94ad8b1c7380874, 0x18375281ae7822bc}, // 1e313 * 2**-912 + {0x87cec76f1c830548, 0x8f2293910d0b15b5}, // 1e314 * 2**-916 + {0xa9c2794ae3a3c69a, 0xb2eb3875504ddb22}, // 1e315 * 2**-919 + {0xd433179d9c8cb841, 0x5fa60692a46151eb}, // 1e316 * 2**-922 + {0x849feec281d7f328, 0xdbc7c41ba6bcd333}, // 1e317 * 2**-926 + {0xa5c7ea73224deff3, 0x12b9b522906c0800}, // 1e318 * 2**-929 + {0xcf39e50feae16bef, 0xd768226b34870a00}, // 1e319 * 2**-932 + {0x81842f29f2cce375, 0xe6a1158300d46640}, // 1e320 * 2**-936 + {0xa1e53af46f801c53, 0x60495ae3c1097fd0}, // 1e321 * 2**-939 + {0xca5e89b18b602368, 0x385bb19cb14bdfc4}, // 1e322 * 2**-942 + {0xfcf62c1dee382c42, 0x46729e03dd9ed7b5}, // 1e323 * 2**-945 + {0x9e19db92b4e31ba9, 0x6c07a2c26a8346d1}, // 1e324 * 2**-949 + {0xc5a05277621be293, 0xc7098b7305241885}, // 1e325 * 2**-952 + {0xf70867153aa2db38, 0xb8cbee4fc66d1ea7}, // 1e326 * 2**-955 + {0x9a65406d44a5c903, 0x737f74f1dc043328}, // 1e327 * 2**-959 + {0xc0fe908895cf3b44, 0x505f522e53053ff2}, // 1e328 * 2**-962 + {0xf13e34aabb430a15, 0x647726b9e7c68fef}, // 1e329 * 2**-965 + {0x96c6e0eab509e64d, 0x5eca783430dc19f5}, // 1e330 * 2**-969 + {0xbc789925624c5fe0, 0xb67d16413d132072}, // 1e331 * 2**-972 + {0xeb96bf6ebadf77d8, 0xe41c5bd18c57e88f}, // 1e332 * 2**-975 + {0x933e37a534cbaae7, 0x8e91b962f7b6f159}, // 1e333 * 2**-979 + {0xb80dc58e81fe95a1, 0x723627bbb5a4adb0}, // 1e334 * 2**-982 + {0xe61136f2227e3b09, 0xcec3b1aaa30dd91c}, // 1e335 * 2**-985 + {0x8fcac257558ee4e6, 0x213a4f0aa5e8a7b1}, // 1e336 * 2**-989 + {0xb3bd72ed2af29e1f, 0xa988e2cd4f62d19d}, // 1e337 * 2**-992 + {0xe0accfa875af45a7, 0x93eb1b80a33b8605}, // 1e338 * 2**-995 + {0x8c6c01c9498d8b88, 0xbc72f130660533c3}, // 1e339 * 2**-999 + {0xaf87023b9bf0ee6a, 0xeb8fad7c7f8680b4}, // 1e340 * 2**-1002 + {0xdb68c2ca82ed2a05, 0xa67398db9f6820e1}, // 1e341 * 2**-1005 + {0x892179be91d43a43, 0x88083f8943a1148c}, // 1e342 * 2**-1009 + {0xab69d82e364948d4, 0x6a0a4f6b948959b0}, // 1e343 * 2**-1012 + {0xd6444e39c3db9b09, 0x848ce34679abb01c}, // 1e344 * 2**-1015 + {0x85eab0e41a6940e5, 0xf2d80e0c0c0b4e11}, // 1e345 * 2**-1019 + {0xa7655d1d2103911f, 0x6f8e118f0f0e2195}, // 1e346 * 2**-1022 + {0xd13eb46469447567, 0x4b7195f2d2d1a9fb}, // 1e347 * 2**-1025 +} diff --git a/src/strconv/testdata/testfp.txt b/src/internal/strconv/testdata/testfp.txt similarity index 100% rename from src/strconv/testdata/testfp.txt rename to src/internal/strconv/testdata/testfp.txt diff --git a/src/internal/synctest/synctest_test.go b/src/internal/synctest/synctest_test.go index 73a0a1c453e..d5ac1e5c1f8 100644 --- a/src/internal/synctest/synctest_test.go +++ b/src/internal/synctest/synctest_test.go @@ -5,6 +5,7 @@ package synctest_test import ( + "context" "fmt" "internal/synctest" "internal/testenv" @@ -329,6 +330,31 @@ func TestAfterFuncRunsImmediately(t *testing.T) { }) } +// TestTimerResetZeroDoNotHang verifies that using timer.Reset(0) does not +// cause the test to hang indefinitely. See https://go.dev/issue/76052. +func TestTimerResetZeroDoNotHang(t *testing.T) { + synctest.Run(func() { + timer := time.NewTimer(0) + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + for { + select { + case <-ctx.Done(): + return + case <-timer.C: + } + } + }() + + synctest.Wait() + timer.Reset(0) + synctest.Wait() + cancel() + synctest.Wait() + }) +} + func TestChannelFromOutsideBubble(t *testing.T) { choutside := make(chan struct{}) for _, test := range []struct { diff --git a/src/internal/syscall/windows/at_windows.go b/src/internal/syscall/windows/at_windows.go index 8dc3ac0e40f..b7ca8433c2a 100644 --- a/src/internal/syscall/windows/at_windows.go +++ b/src/internal/syscall/windows/at_windows.go @@ -131,6 +131,14 @@ func Openat(dirfd syscall.Handle, name string, flag uint64, perm uint32) (_ sysc if flag&syscall.O_TRUNC != 0 { err = syscall.Ftruncate(h, 0) + if err == ERROR_INVALID_PARAMETER { + // ERROR_INVALID_PARAMETER means truncation is not supported on this file handle. + // Unix's O_TRUNC specification says to ignore O_TRUNC on named pipes and terminal devices. + // We do the same here. + if t, err1 := syscall.GetFileType(h); err1 == nil && (t == syscall.FILE_TYPE_PIPE || t == syscall.FILE_TYPE_CHAR) { + err = nil + } + } if err != nil { syscall.CloseHandle(h) return syscall.InvalidHandle, err @@ -209,7 +217,7 @@ func Deleteat(dirfd syscall.Handle, name string, options uint32) error { var h syscall.Handle err := NtOpenFile( &h, - SYNCHRONIZE|DELETE, + SYNCHRONIZE|FILE_READ_ATTRIBUTES|DELETE, objAttrs, &IO_STATUS_BLOCK{}, FILE_SHARE_DELETE|FILE_SHARE_READ|FILE_SHARE_WRITE, @@ -220,14 +228,22 @@ func Deleteat(dirfd syscall.Handle, name string, options uint32) error { } defer syscall.CloseHandle(h) - const ( - FileDispositionInformation = 13 - FileDispositionInformationEx = 64 - ) + if TestDeleteatFallback { + return deleteatFallback(h) + } + + const FileDispositionInformationEx = 64 // First, attempt to delete the file using POSIX semantics // (which permit a file to be deleted while it is still open). // This matches the behavior of DeleteFileW. + // + // The following call uses features available on different Windows versions: + // - FILE_DISPOSITION_INFORMATION_EX: Windows 10, version 1607 (aka RS1) + // - FILE_DISPOSITION_POSIX_SEMANTICS: Windows 10, version 1607 (aka RS1) + // - FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE: Windows 10, version 1809 (aka RS5) + // + // Also, some file systems, like FAT32, don't support POSIX semantics. err = NtSetInformationFile( h, &IO_STATUS_BLOCK{}, @@ -246,28 +262,57 @@ func Deleteat(dirfd syscall.Handle, name string, options uint32) error { switch err { case nil: return nil - case STATUS_CANNOT_DELETE, STATUS_DIRECTORY_NOT_EMPTY: + case STATUS_INVALID_INFO_CLASS, // the operating system doesn't support FileDispositionInformationEx + STATUS_INVALID_PARAMETER, // the operating system doesn't support one of the flags + STATUS_NOT_SUPPORTED: // the file system doesn't support FILE_DISPOSITION_INFORMATION_EX or one of the flags + return deleteatFallback(h) + default: return err.(NTStatus).Errno() } +} - // If the prior deletion failed, the filesystem either doesn't support - // POSIX semantics (for example, FAT), or hasn't implemented - // FILE_DISPOSITION_INFORMATION_EX. - // - // Try again. - err = NtSetInformationFile( +// TestDeleteatFallback should only be used for testing purposes. +// When set, [Deleteat] uses the fallback path unconditionally. +var TestDeleteatFallback bool + +// deleteatFallback is a deleteat implementation that strives +// for compatibility with older Windows versions and file systems +// over performance. +func deleteatFallback(h syscall.Handle) error { + var data syscall.ByHandleFileInformation + if err := syscall.GetFileInformationByHandle(h, &data); err == nil && data.FileAttributes&syscall.FILE_ATTRIBUTE_READONLY != 0 { + // Remove read-only attribute. Reopen the file, as it was previously open without FILE_WRITE_ATTRIBUTES access + // in order to maximize compatibility in the happy path. + wh, err := ReOpenFile(h, + FILE_WRITE_ATTRIBUTES, + FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE, + syscall.FILE_FLAG_OPEN_REPARSE_POINT|syscall.FILE_FLAG_BACKUP_SEMANTICS, + ) + if err != nil { + return err + } + err = SetFileInformationByHandle( + wh, + FileBasicInfo, + unsafe.Pointer(&FILE_BASIC_INFO{ + FileAttributes: data.FileAttributes &^ FILE_ATTRIBUTE_READONLY, + }), + uint32(unsafe.Sizeof(FILE_BASIC_INFO{})), + ) + syscall.CloseHandle(wh) + if err != nil { + return err + } + } + + return SetFileInformationByHandle( h, - &IO_STATUS_BLOCK{}, - unsafe.Pointer(&FILE_DISPOSITION_INFORMATION{ + FileDispositionInfo, + unsafe.Pointer(&FILE_DISPOSITION_INFO{ DeleteFile: true, }), - uint32(unsafe.Sizeof(FILE_DISPOSITION_INFORMATION{})), - FileDispositionInformation, + uint32(unsafe.Sizeof(FILE_DISPOSITION_INFO{})), ) - if st, ok := err.(NTStatus); ok { - return st.Errno() - } - return err } func Renameat(olddirfd syscall.Handle, oldpath string, newdirfd syscall.Handle, newpath string) error { diff --git a/src/internal/syscall/windows/symlink_windows.go b/src/internal/syscall/windows/symlink_windows.go index b91246037b5..b8249b3848e 100644 --- a/src/internal/syscall/windows/symlink_windows.go +++ b/src/internal/syscall/windows/symlink_windows.go @@ -19,6 +19,7 @@ const ( FileBasicInfo = 0 // FILE_BASIC_INFO FileStandardInfo = 1 // FILE_STANDARD_INFO FileNameInfo = 2 // FILE_NAME_INFO + FileDispositionInfo = 4 // FILE_DISPOSITION_INFO FileStreamInfo = 7 // FILE_STREAM_INFO FileCompressionInfo = 8 // FILE_COMPRESSION_INFO FileAttributeTagInfo = 9 // FILE_ATTRIBUTE_TAG_INFO diff --git a/src/internal/syscall/windows/syscall_windows.go b/src/internal/syscall/windows/syscall_windows.go index fb3b66540f1..b908a2c2519 100644 --- a/src/internal/syscall/windows/syscall_windows.go +++ b/src/internal/syscall/windows/syscall_windows.go @@ -531,6 +531,8 @@ const ( //sys GetOverlappedResult(handle syscall.Handle, overlapped *syscall.Overlapped, done *uint32, wait bool) (err error) //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW +//sys ReOpenFile(filehandle syscall.Handle, desiredAccess uint32, shareMode uint32, flagAndAttributes uint32) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] + // NTStatus corresponds with NTSTATUS, error values returned by ntdll.dll and // other native functions. type NTStatus uint32 @@ -556,6 +558,9 @@ const ( STATUS_NOT_A_DIRECTORY NTStatus = 0xC0000103 STATUS_CANNOT_DELETE NTStatus = 0xC0000121 STATUS_REPARSE_POINT_ENCOUNTERED NTStatus = 0xC000050B + STATUS_NOT_SUPPORTED NTStatus = 0xC00000BB + STATUS_INVALID_PARAMETER NTStatus = 0xC000000D + STATUS_INVALID_INFO_CLASS NTStatus = 0xC0000003 ) const ( diff --git a/src/internal/syscall/windows/types_windows.go b/src/internal/syscall/windows/types_windows.go index fe9e41f2f86..49daf9b31b1 100644 --- a/src/internal/syscall/windows/types_windows.go +++ b/src/internal/syscall/windows/types_windows.go @@ -216,6 +216,11 @@ const ( FILE_OPEN_FOR_FREE_SPACE_QUERY = 0x00800000 ) +// https://learn.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_disposition_info +type FILE_DISPOSITION_INFO struct { + DeleteFile bool +} + // https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/ntddk/ns-ntddk-_file_disposition_information type FILE_DISPOSITION_INFORMATION struct { DeleteFile bool diff --git a/src/internal/syscall/windows/zsyscall_windows.go b/src/internal/syscall/windows/zsyscall_windows.go index 70c4d76dff0..d087fd46f6b 100644 --- a/src/internal/syscall/windows/zsyscall_windows.go +++ b/src/internal/syscall/windows/zsyscall_windows.go @@ -86,6 +86,7 @@ var ( procModule32NextW = modkernel32.NewProc("Module32NextW") procMoveFileExW = modkernel32.NewProc("MoveFileExW") procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") + procReOpenFile = modkernel32.NewProc("ReOpenFile") procRtlLookupFunctionEntry = modkernel32.NewProc("RtlLookupFunctionEntry") procRtlVirtualUnwind = modkernel32.NewProc("RtlVirtualUnwind") procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") @@ -440,6 +441,15 @@ func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, return } +func ReOpenFile(filehandle syscall.Handle, desiredAccess uint32, shareMode uint32, flagAndAttributes uint32) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.SyscallN(procReOpenFile.Addr(), uintptr(filehandle), uintptr(desiredAccess), uintptr(shareMode), uintptr(flagAndAttributes)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = errnoErr(e1) + } + return +} + func RtlLookupFunctionEntry(pc uintptr, baseAddress *uintptr, table unsafe.Pointer) (ret *RUNTIME_FUNCTION) { r0, _, _ := syscall.SyscallN(procRtlLookupFunctionEntry.Addr(), uintptr(pc), uintptr(unsafe.Pointer(baseAddress)), uintptr(table)) ret = (*RUNTIME_FUNCTION)(unsafe.Pointer(r0)) diff --git a/src/internal/testenv/testenv_unix.go b/src/internal/testenv/testenv_unix.go index a629078842e..22eeca220da 100644 --- a/src/internal/testenv/testenv_unix.go +++ b/src/internal/testenv/testenv_unix.go @@ -21,8 +21,7 @@ func syscallIsNotSupported(err error) bool { return false } - var errno syscall.Errno - if errors.As(err, &errno) { + if errno, ok := errors.AsType[syscall.Errno](err); ok { switch errno { case syscall.EPERM, syscall.EROFS: // User lacks permission: either the call requires root permission and the diff --git a/src/internal/trace/event.go b/src/internal/trace/event.go index 321e4e21fb3..b78e5232946 100644 --- a/src/internal/trace/event.go +++ b/src/internal/trace/event.go @@ -197,7 +197,7 @@ type Range struct { Scope ResourceID } -// RangeAttributes provides attributes about a completed Range. +// RangeAttribute provides attributes about a completed Range. type RangeAttribute struct { // Name is the human-readable name for the range. Name string diff --git a/src/internal/trace/reader_test.go b/src/internal/trace/reader_test.go index 39ae77471e7..c03d0676a07 100644 --- a/src/internal/trace/reader_test.go +++ b/src/internal/trace/reader_test.go @@ -7,19 +7,14 @@ package trace_test import ( "bytes" "flag" - "fmt" "io" - "os" "path/filepath" "runtime" - "strings" "testing" "time" "internal/trace" - "internal/trace/raw" "internal/trace/testtrace" - "internal/trace/version" ) var ( @@ -33,7 +28,6 @@ func TestReaderGolden(t *testing.T) { t.Fatalf("failed to glob for tests: %v", err) } for _, testPath := range matches { - testPath := testPath testName, err := filepath.Rel("./testdata", testPath) if err != nil { t.Fatalf("failed to relativize testdata path: %v", err) @@ -132,52 +126,6 @@ func testReader(t *testing.T, tr io.Reader, v *testtrace.Validator, exp *testtra } } -func dumpTraceToText(t *testing.T, b []byte) string { - t.Helper() - - br, err := raw.NewReader(bytes.NewReader(b)) - if err != nil { - t.Fatalf("dumping trace: %v", err) - } - var sb strings.Builder - tw, err := raw.NewTextWriter(&sb, version.Current) - if err != nil { - t.Fatalf("dumping trace: %v", err) - } - for { - ev, err := br.ReadEvent() - if err == io.EOF { - break - } - if err != nil { - t.Fatalf("dumping trace: %v", err) - } - if err := tw.WriteEvent(ev); err != nil { - t.Fatalf("dumping trace: %v", err) - } - } - return sb.String() -} - -func dumpTraceToFile(t *testing.T, testName string, stress bool, b []byte) string { - t.Helper() - - desc := "default" - if stress { - desc = "stress" - } - name := fmt.Sprintf("%s.%s.trace.", testName, desc) - f, err := os.CreateTemp("", name) - if err != nil { - t.Fatalf("creating temp file: %v", err) - } - defer f.Close() - if _, err := io.Copy(f, bytes.NewReader(b)); err != nil { - t.Fatalf("writing trace dump to %q: %v", f.Name(), err) - } - return f.Name() -} - func TestTraceGenSync(t *testing.T) { type sync struct { Time trace.Time diff --git a/src/internal/trace/testtrace/helpers.go b/src/internal/trace/testtrace/helpers.go new file mode 100644 index 00000000000..8a64d5c2ee1 --- /dev/null +++ b/src/internal/trace/testtrace/helpers.go @@ -0,0 +1,87 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testtrace + +import ( + "bytes" + "fmt" + "internal/testenv" + "internal/trace/raw" + "internal/trace/version" + "io" + "os" + "strings" + "testing" +) + +// Dump saves the trace to a file or the test log. +func Dump(t *testing.T, testName string, traceBytes []byte, forceToFile bool) { + onBuilder := testenv.Builder() != "" + onOldBuilder := !strings.Contains(testenv.Builder(), "gotip") && !strings.Contains(testenv.Builder(), "go1") + + if onBuilder && !forceToFile { + // Dump directly to the test log on the builder, since this + // data is critical for debugging and this is the only way + // we can currently make sure it's retained. + s := dumpTraceToText(t, traceBytes) + if onOldBuilder && len(s) > 1<<20+512<<10 { + // The old build infrastructure truncates logs at ~2 MiB. + // Let's assume we're the only failure and give ourselves + // up to 1.5 MiB to dump the trace. + // + // TODO(mknyszek): Remove this when we've migrated off of + // the old infrastructure. + t.Logf("text trace too large to dump (%d bytes)", len(s)) + } else { + t.Log(s) + t.Log("Convert this to a raw trace with `go test internal/trace/testtrace -convert in.tracetxt -out out.trace`") + } + } else { + // We asked to dump the trace or failed. Write the trace to a file. + t.Logf("wrote trace to file: %s", dumpTraceToFile(t, testName, traceBytes)) + } +} + +func dumpTraceToText(t *testing.T, b []byte) string { + t.Helper() + + br, err := raw.NewReader(bytes.NewReader(b)) + if err != nil { + t.Fatalf("dumping trace: %v", err) + } + var sb strings.Builder + tw, err := raw.NewTextWriter(&sb, version.Current) + if err != nil { + t.Fatalf("dumping trace: %v", err) + } + for { + ev, err := br.ReadEvent() + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("dumping trace: %v", err) + } + if err := tw.WriteEvent(ev); err != nil { + t.Fatalf("dumping trace: %v", err) + } + } + return sb.String() +} + +func dumpTraceToFile(t *testing.T, testName string, b []byte) string { + t.Helper() + + name := fmt.Sprintf("%s.trace.", testName) + f, err := os.CreateTemp(t.ArtifactDir(), name) + if err != nil { + t.Fatalf("creating temp file: %v", err) + } + defer f.Close() + if _, err := io.Copy(f, bytes.NewReader(b)); err != nil { + t.Fatalf("writing trace dump to %q: %v", f.Name(), err) + } + return f.Name() +} diff --git a/src/internal/trace/testtrace/helpers_test.go b/src/internal/trace/testtrace/helpers_test.go new file mode 100644 index 00000000000..526f8d6a8ba --- /dev/null +++ b/src/internal/trace/testtrace/helpers_test.go @@ -0,0 +1,79 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testtrace + +import ( + "flag" + "fmt" + "io" + "internal/trace/raw" + "os" + "testing" +) + +var ( + convert = flag.String("convert", "", "Path to trace text file to convert to binary format") + output = flag.String("out", "", "Output path for converted trace") +) + +// TestConvertDump is not actually a test, it is a tool for converting trace +// text dumps generated by Dump into the binary trace format. Set -convert and +// -o to perform a converison. +// +// go test internal/trace/testtrace -convert in.tracetxt -out out.trace +// +// This would be cleaner as a dedicated internal command rather than a test, +// but cmd/dist does not handle internal (non-distributed) commands in std +// well. +func TestConvertDump(t *testing.T) { + if *convert == "" { + t.Skip("Set -convert to convert a trace text file") + } + if *output == "" { + t.Fatal("Set -out to specify conversion output") + } + + if err := convertDump(*convert, *output); err != nil { + t.Error(err) + } +} + +func convertDump(inPath, outPath string) error { + in, err := os.Open(inPath) + if err != nil { + return fmt.Errorf("error opening input: %v", err) + } + defer in.Close() + + out, err := os.Create(outPath) + if err != nil { + return fmt.Errorf("error creating output: %v", err) + } + defer out.Close() + + tr, err := raw.NewTextReader(in) + if err != nil { + return fmt.Errorf("error creating text reader: %v", err) + } + tw, err := raw.NewWriter(out, tr.Version()) + if err != nil { + return fmt.Errorf("error creating raw writer: %v", err) + } + + for { + ev, err := tr.ReadEvent() + if err == io.EOF { + break + } + if err != nil { + return fmt.Errorf("bad trace file: %v", err) + } + if err := tw.WriteEvent(ev); err != nil { + return fmt.Errorf("failed to write trace bytes: %v", err) + } + } + + return nil +} diff --git a/src/internal/trace/trace_test.go b/src/internal/trace/trace_test.go index 4824937b700..479411548f2 100644 --- a/src/internal/trace/trace_test.go +++ b/src/internal/trace/trace_test.go @@ -448,6 +448,9 @@ func TestTraceStacks(t *testing.T) { {"main.main.func11", 0}, }}, }...) + if runtime.GOOS == "darwin" { + want[len(want)-1].frames = append([]frame{{"syscall.syscall", 0}}, want[len(want)-1].frames...) + } } stackMatches := func(stk trace.Stack, frames []frame) bool { for i, f := range slices.Collect(stk.Frames()) { @@ -668,26 +671,14 @@ func testTraceProg(t *testing.T, progName string, extra func(t *testing.T, trace } // Dump some more information on failure. - if t.Failed() && onBuilder { - // Dump directly to the test log on the builder, since this - // data is critical for debugging and this is the only way - // we can currently make sure it's retained. - t.Log("found bad trace; dumping to test log...") - s := dumpTraceToText(t, tb) - if onOldBuilder && len(s) > 1<<20+512<<10 { - // The old build infrastructure truncates logs at ~2 MiB. - // Let's assume we're the only failure and give ourselves - // up to 1.5 MiB to dump the trace. - // - // TODO(mknyszek): Remove this when we've migrated off of - // the old infrastructure. - t.Logf("text trace too large to dump (%d bytes)", len(s)) - } else { - t.Log(s) + if t.Failed() || *dumpTraces { + suffix := func(stress bool) string { + if stress { + return "stress" + } + return "default" } - } else if t.Failed() || *dumpTraces { - // We asked to dump the trace or failed. Write the trace to a file. - t.Logf("wrote trace to file: %s", dumpTraceToFile(t, testName, stress, tb)) + testtrace.Dump(t, fmt.Sprintf("%s.%s", testName, suffix(stress)), tb, *dumpTraces) } } t.Run("Default", func(t *testing.T) { diff --git a/src/internal/trace/tracev1_test.go b/src/internal/trace/tracev1_test.go index 42a83718877..355a6ff529f 100644 --- a/src/internal/trace/tracev1_test.go +++ b/src/internal/trace/tracev1_test.go @@ -21,7 +21,6 @@ func TestTraceV1(t *testing.T) { } var testedUserRegions bool for _, p := range traces { - p := p testName, err := filepath.Rel("./internal/tracev1/testdata", p) if err != nil { t.Fatalf("failed to relativize testdata path: %s", err) diff --git a/src/internal/types/testdata/check/builtins0.go b/src/internal/types/testdata/check/builtins0.go index e326b92ac7b..9b99a890acf 100644 --- a/src/internal/types/testdata/check/builtins0.go +++ b/src/internal/types/testdata/check/builtins0.go @@ -609,36 +609,38 @@ func min2() { ) } -func newInvalid() { - f2 := func() (x, y int) { return } +func new1() { _ = new() // ERROR "not enough arguments" _ = new(1, 2) // ERROR "too many arguments" + _ = new(unsafe /* ERROR "use of package unsafe not in selector" */ ) + + _ = new(struct{ x, y int }) + p := new(float64) + q := new(*float64) + _ = *p == **q + + type G[P any] struct{} + _ = new(G[int]) + _ = new(G /* ERROR "cannot use generic type G without instantiation" */ ) + new /* ERROR "not used" */ (int) _ = &new /* ERROR "cannot take address" */ (int) _ = new(int... /* ERROR "invalid use of ..." */) _ = new(f0 /* ERROR "f0() (no value) used as value or type" */ ()) _ = new(len /* ERROR "len (built-in) must be called" */) _ = new(1 /* ERROR "argument to new (overflows)" */ << 70) - _ = new(f2 /* ERRORx "multiple-value.*in single-value context" */ ()) } -// new(T) -func newType() { - _ = new(struct{ x, y int }) - - p := new(float64) - q := new(*float64) - _ = *p == **q -} - -// new(expr), added in go1.26 -func newExpr() { - f1 := func() (x []int) { return } +func new2() { + // new(expr), added in go1.26 + f1 := func() []int { panic(0) } + f2 := func() (int, int) { panic(0) } var ( _ *[]int = new(f1()) _ *func() []int = new(f1) _ *bool = new(false) + _ *bool = new(1 < 2) _ *int = new(123) _ *float64 = new(1.0) _ *uint = new(uint(3)) @@ -647,6 +649,14 @@ func newExpr() { _ *struct{} = new(struct{}{}) _ *any = new(any) + _ = new(f2 /* ERRORx "multiple-value.*in single-value context" */ ()) + _ = new(1 << /* ERROR "constant shift overflow" */ 1000) + _ = new(1e10000 /* ERROR "cannot use 1e10000 (untyped float constant 1e+10000) as float64 value in argument to new (overflows)" */ ) + _ = new(nil /* ERROR "use of untyped nil in argument to new" */ ) + _ = new(comparable /* ERROR "cannot use type comparable outside a type constraint" */ ) + _ = new(new /* ERROR "new (built-in) must be called" */ ) + _ = new(panic /* ERROR "panic(0) (no value) used as value or type" */ (0)) + // from issue 43125 _ = new(-1) _ = new(1 + 1) diff --git a/src/internal/types/testdata/check/go1_25.go b/src/internal/types/testdata/check/go1_25.go index b2ace833439..3799bc02b46 100644 --- a/src/internal/types/testdata/check/go1_25.go +++ b/src/internal/types/testdata/check/go1_25.go @@ -10,4 +10,9 @@ package p -var _ = new /* ERROR "new(expr) requires go1.26 or later" */ (123) +func f(x int) { + _ = new /* ERROR "new(123) requires go1.26 or later" */ (123) + _ = new /* ERROR "new(x) requires go1.26 or later" */ (x) + _ = new /* ERROR "new(f) requires go1.26 or later" */ (f) + _ = new /* ERROR "new(1 < 2) requires go1.26 or later" */ (1 < 2) +} diff --git a/src/internal/types/testdata/fixedbugs/issue45550.go b/src/internal/types/testdata/fixedbugs/issue45550.go index 2ea4ffe3079..32fdde6740c 100644 --- a/src/internal/types/testdata/fixedbugs/issue45550.go +++ b/src/internal/types/testdata/fixedbugs/issue45550.go @@ -4,7 +4,7 @@ package p -type Builder /* ERROR "invalid recursive type" */ [T interface{ struct{ Builder[T] } }] struct{} +type Builder[T ~struct{ Builder[T] }] struct{} type myBuilder struct { Builder[myBuilder] } diff --git a/src/internal/types/testdata/fixedbugs/issue46461.go b/src/internal/types/testdata/fixedbugs/issue46461.go index e823013f995..454f7e83653 100644 --- a/src/internal/types/testdata/fixedbugs/issue46461.go +++ b/src/internal/types/testdata/fixedbugs/issue46461.go @@ -7,16 +7,16 @@ package p // test case 1 -type T /* ERROR "invalid recursive type" */ [U interface{ M() T[U] }] int +type T[U interface{ M() T[U] }] int type X int func (X) M() T[X] { return 0 } // test case 2 -type A /* ERROR "invalid recursive type" */ [T interface{ A[T] }] interface{} +type A[T interface{ A[T] }] interface{} // test case 3 -type A2 /* ERROR "invalid recursive type" */ [U interface{ A2[U] }] interface{ M() A2[U] } +type A2[U interface{ A2[U] }] interface{ M() A2[U] } type I interface{ A2[I]; M() A2[I] } diff --git a/src/internal/types/testdata/fixedbugs/issue46461a.go b/src/internal/types/testdata/fixedbugs/issue46461a.go index e4b8e1a240a..74ed6c48827 100644 --- a/src/internal/types/testdata/fixedbugs/issue46461a.go +++ b/src/internal/types/testdata/fixedbugs/issue46461a.go @@ -7,17 +7,16 @@ package p // test case 1 -type T /* ERROR "invalid recursive type" */ [U interface{ M() T[U] }] int +type T[U interface{ M() T[U] }] int type X int func (X) M() T[X] { return 0 } // test case 2 -type A /* ERROR "invalid recursive type" */ [T interface{ A[T] }] interface{} +type A[T interface{ A[T] }] interface{} // test case 3 -// TODO(gri) should report error only once -type A2 /* ERROR "invalid recursive type" */ /* ERROR "invalid recursive type" */ [U interface{ A2[U] }] interface{ M() A2[U] } +type A2[U interface{ A2[U] }] interface{ M() A2[U] } type I interface{ A2[I]; M() A2[I] } diff --git a/src/internal/types/testdata/fixedbugs/issue47796.go b/src/internal/types/testdata/fixedbugs/issue47796.go index 7f719ff6745..b07cdddabab 100644 --- a/src/internal/types/testdata/fixedbugs/issue47796.go +++ b/src/internal/types/testdata/fixedbugs/issue47796.go @@ -6,16 +6,16 @@ package p // parameterized types with self-recursive constraints type ( - T1 /* ERROR "invalid recursive type" */ [P T1[P]] interface{} - T2 /* ERROR "invalid recursive type" */ [P, Q T2[P, Q]] interface{} + T1[P T1[P]] interface{} + T2[P, Q T2[P, Q]] interface{} T3[P T2[P, Q], Q interface{ ~string }] interface{} - T4a /* ERROR "invalid recursive type" */ [P T4a[P]] interface{ ~int } - T4b /* ERROR "invalid recursive type" */ [P T4b[int]] interface{ ~int } - T4c /* ERROR "invalid recursive type" */ [P T4c[string]] interface{ ~int } + T4a[P T4a[P]] interface{ ~int } + T4b[P T4b[int]] interface{ ~int } + T4c[P T4c[string /* ERROR "string does not satisfy T4c[string]" */]] interface{ ~int } // mutually recursive constraints - T5 /* ERROR "invalid recursive type" */ [P T6[P]] interface{ int } + T5[P T6[P]] interface{ int } T6[P T5[P]] interface{ int } ) @@ -28,6 +28,6 @@ var ( // test case from issue -type Eq /* ERROR "invalid recursive type" */ [a Eq[a]] interface { +type Eq[a Eq[a]] interface { Equal(that a) bool } diff --git a/src/internal/types/testdata/fixedbugs/issue48529.go b/src/internal/types/testdata/fixedbugs/issue48529.go index bcc5e3536d3..eca1da89232 100644 --- a/src/internal/types/testdata/fixedbugs/issue48529.go +++ b/src/internal/types/testdata/fixedbugs/issue48529.go @@ -4,7 +4,7 @@ package p -type T /* ERROR "invalid recursive type" */ [U interface{ M() T[U, int] }] int +type T[U interface{ M() T /* ERROR "too many type arguments for type T" */ [U, int] }] int type X int diff --git a/src/internal/types/testdata/fixedbugs/issue49439.go b/src/internal/types/testdata/fixedbugs/issue49439.go index 3852f160948..63bedf61911 100644 --- a/src/internal/types/testdata/fixedbugs/issue49439.go +++ b/src/internal/types/testdata/fixedbugs/issue49439.go @@ -6,21 +6,21 @@ package p import "unsafe" -type T0 /* ERROR "invalid recursive type" */ [P T0[P]] struct{} +type T0[P T0[P]] struct{} -type T1 /* ERROR "invalid recursive type" */ [P T2[P]] struct{} -type T2[P T1[P]] struct{} +type T1[P T2[P /* ERROR "P does not satisfy T1[P]" */]] struct{} +type T2[P T1[P /* ERROR "P does not satisfy T2[P]" */]] struct{} -type T3 /* ERROR "invalid recursive type" */ [P interface{ ~struct{ f T3[int] } }] struct{} +type T3[P interface{ ~struct{ f T3[int /* ERROR "int does not satisfy" */ ] } }] struct{} // valid cycle in M type N[P M[P]] struct{} -type M[Q any] struct { F *M[Q] } +type M[Q any] struct{ F *M[Q] } // "crazy" case type TC[P [unsafe.Sizeof(func() { - type T [P [unsafe.Sizeof(func(){})]byte] struct{} + type T[P [unsafe.Sizeof(func() {})]byte] struct{} })]byte] struct{} // test case from issue -type X /* ERROR "invalid recursive type" */ [T any, PT X[T]] interface{} +type X[T any, PT X /* ERROR "not enough type arguments for type X" */ [T]] interface{} diff --git a/src/internal/types/testdata/fixedbugs/issue68162.go b/src/internal/types/testdata/fixedbugs/issue68162.go new file mode 100644 index 00000000000..8efd8a66dff --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue68162.go @@ -0,0 +1,24 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +type N[B N[B]] interface { + Add(B) B +} + +func Add[P N[P]](x, y P) P { + return x.Add(y) +} + +type MyInt int + +func (x MyInt) Add(y MyInt) MyInt { + return x + y +} + +func main() { + var x, y MyInt = 2, 3 + println(Add(x, y)) +} diff --git a/src/internal/types/testdata/fixedbugs/issue75194.go b/src/internal/types/testdata/fixedbugs/issue75194.go new file mode 100644 index 00000000000..ec2f9249ec9 --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue75194.go @@ -0,0 +1,14 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type A /* ERROR "invalid recursive type: A refers to itself" */ struct { + a A +} + +type B /* ERROR "invalid recursive type: B refers to itself" */ struct { + a A + b B +} diff --git a/src/internal/types/testdata/fixedbugs/issue75883.go b/src/internal/types/testdata/fixedbugs/issue75883.go new file mode 100644 index 00000000000..33b23505f43 --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue75883.go @@ -0,0 +1,20 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test cases that were invalid because of cycles before the respective language change. +// Some are still invalid, but not because of cycles. + +package p + +type T1[P T1[P]] struct{} +type T2[P interface { + T2[int /* ERROR "int does not satisfy interface{T2[int]}" */] +}] struct{} +type T3[P interface { + m(T3[int /* ERROR "int does not satisfy interface{m(T3[int])}" */]) +}] struct{} +type T4[P T5[P /* ERROR "P does not satisfy T4[P]" */]] struct{} +type T5[P T4[P /* ERROR "P does not satisfy T5[P]" */]] struct{} + +type T6[P int] struct{ f *T6[P] } diff --git a/src/internal/types/testdata/fixedbugs/issue75986.go b/src/internal/types/testdata/fixedbugs/issue75986.go new file mode 100644 index 00000000000..b2b1509e03b --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue75986.go @@ -0,0 +1,28 @@ +// -lang=go1.25 + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +import "strings" + +type T int +type G[P any] struct{} + +var x T + +// Verify that we don't get a version error when there's another error present in new(expr). + +func f() { + _ = new(U /* ERROR "undefined: U" */) + _ = new(strings.BUILDER /* ERROR "undefined: strings.BUILDER (but have Builder)" */) + _ = new(T) // ok + _ = new(G[int]) // ok + _ = new(G /* ERROR "cannot use generic type G without instantiation" */) + _ = new(nil /* ERROR "use of untyped nil in argument to new" */) + _ = new(comparable /* ERROR "cannot use type comparable outside a type constraint" */) + _ = new(new /* ERROR "new (built-in) must be called" */) + _ = new(panic /* ERROR "panic(0) (no value) used as value or type" */ (0)) +} diff --git a/src/internal/types/testdata/fixedbugs/issue76103.go b/src/internal/types/testdata/fixedbugs/issue76103.go new file mode 100644 index 00000000000..6ba0d3c6776 --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue76103.go @@ -0,0 +1,29 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +func _() { + f(foo /* ERROR "undefined: foo" */) // ERROR "not enough arguments in call to f\n\thave (unknown type)\n\twant (int, int)" +} + +func f(_, _ int) {} + +// test case from issue + +type S struct{} + +func (S) G() {} + +func main() { + var s S + _ = must(s.F /* ERROR "s.F undefined" */ ()) // ERROR "not enough arguments in call to must\n\thave (unknown type)\n\twant (T, error)" +} + +func must[T any](x T, err error) T { + if err != nil { + panic(err) + } + return x +} diff --git a/src/internal/types/testdata/fixedbugs/issue76220.go b/src/internal/types/testdata/fixedbugs/issue76220.go new file mode 100644 index 00000000000..ad465010a05 --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue76220.go @@ -0,0 +1,17 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +func _() { + append(nil /* ERROR "argument must be a slice; have untyped nil" */, ""...) +} + +// test case from issue + +func main() { + s := "hello" + msg := append(nil /* ERROR "argument must be a slice; have untyped nil" */, s...) + print(msg) +} diff --git a/src/internal/zstd/fse_test.go b/src/internal/zstd/fse_test.go index 6f106b65b77..20365745a54 100644 --- a/src/internal/zstd/fse_test.go +++ b/src/internal/zstd/fse_test.go @@ -68,7 +68,6 @@ func TestPredefinedTables(t *testing.T) { }, } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { var r Reader table := make([]fseEntry, 1<>= 8 n = 8 } - return n + int(len8tab[x]) + return n + int(len8tab[uint8(x)]) } // Len32 returns the minimum number of bits required to represent x; the result is 0 for x == 0. @@ -330,7 +330,7 @@ func Len32(x uint32) (n int) { x >>= 8 n += 8 } - return n + int(len8tab[x]) + return n + int(len8tab[uint8(x)]) } // Len64 returns the minimum number of bits required to represent x; the result is 0 for x == 0. @@ -347,7 +347,7 @@ func Len64(x uint64) (n int) { x >>= 8 n += 8 } - return n + int(len8tab[x]) + return n + int(len8tab[uint8(x)]) } // --- Add with carry --- diff --git a/src/math/rand/default_test.go b/src/math/rand/default_test.go index 0ba51b4dbd4..25c24244c46 100644 --- a/src/math/rand/default_test.go +++ b/src/math/rand/default_test.go @@ -34,7 +34,6 @@ func TestDefaultRace(t *testing.T) { t.Parallel() for i := 0; i < 6; i++ { - i := i t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() cmd := testenv.Command(t, testenv.Executable(t), "-test.run=^TestDefaultRace$") diff --git a/src/mime/grammar.go b/src/mime/grammar.go index cc578fbcfd4..1efd8a16dec 100644 --- a/src/mime/grammar.go +++ b/src/mime/grammar.go @@ -62,7 +62,9 @@ func isTokenChar(c byte) bool { 1<<'^' | 1<<'_' | 1<<'`' | + 1<<'{' | 1<<'|' | + 1<<'}' | 1<<'~' return ((uint64(1)<>64)) != 0 diff --git a/src/mime/mediatype_test.go b/src/mime/mediatype_test.go index 251df8d6691..da8d64de7a3 100644 --- a/src/mime/mediatype_test.go +++ b/src/mime/mediatype_test.go @@ -413,6 +413,9 @@ func init() { // Issue #48866: duplicate parameters containing equal values should be allowed {`text; charset=utf-8; charset=utf-8; format=fixed`, "text", m("charset", "utf-8", "format", "fixed")}, {`text; charset=utf-8; format=flowed; charset=utf-8`, "text", m("charset", "utf-8", "format", "flowed")}, + + // Issue #76236: '{' and '}' are token chars. + {"attachment; filename={file}.png", "attachment", m("filename", "{file}.png")}, } } diff --git a/src/net/cgo_unix_syscall.go b/src/net/cgo_unix_syscall.go index 9cfc5783721..6b7ef74bd2b 100644 --- a/src/net/cgo_unix_syscall.go +++ b/src/net/cgo_unix_syscall.go @@ -84,8 +84,7 @@ func _C_res_nclose(state *_C_struct___res_state) { } func cgoNameinfoPTR(b []byte, sa *syscall.RawSockaddr, salen int) (int, error) { - gerrno, err := unix.Getnameinfo(sa, salen, &b[0], len(b), nil, 0, unix.NI_NAMEREQD) - return int(gerrno), err + return unix.Getnameinfo(sa, salen, &b[0], len(b), nil, 0, unix.NI_NAMEREQD) } func cgoSockaddrInet4(ip IP) *syscall.RawSockaddr { diff --git a/src/net/conn_test.go b/src/net/conn_test.go index d1e1e7bf1cd..87097e10ee3 100644 --- a/src/net/conn_test.go +++ b/src/net/conn_test.go @@ -19,7 +19,6 @@ const someTimeout = 1 * time.Hour func TestConnAndListener(t *testing.T) { for i, network := range []string{"tcp", "unix", "unixpacket"} { - i, network := i, network t.Run(network, func(t *testing.T) { if !testableNetwork(network) { t.Skipf("skipping %s test", network) diff --git a/src/net/dial_test.go b/src/net/dial_test.go index 829b80c33a1..07a9b46ddb6 100644 --- a/src/net/dial_test.go +++ b/src/net/dial_test.go @@ -232,7 +232,6 @@ func TestDialParallel(t *testing.T) { } for i, tt := range testCases { - i, tt := i, tt t.Run(fmt.Sprint(i), func(t *testing.T) { dialTCP := func(ctx context.Context, network string, laddr, raddr *TCPAddr) (*TCPConn, error) { n := "tcp6" diff --git a/src/net/dnsclient.go b/src/net/dnsclient.go index 5f135cc2117..eb509d175fc 100644 --- a/src/net/dnsclient.go +++ b/src/net/dnsclient.go @@ -7,7 +7,7 @@ package net import ( "cmp" "internal/bytealg" - "internal/itoa" + "internal/strconv" "slices" _ "unsafe" // for go:linkname @@ -36,7 +36,7 @@ func reverseaddr(addr string) (arpa string, err error) { return "", &DNSError{Err: "unrecognized address", Name: addr} } if ip.To4() != nil { - return itoa.Uitoa(uint(ip[15])) + "." + itoa.Uitoa(uint(ip[14])) + "." + itoa.Uitoa(uint(ip[13])) + "." + itoa.Uitoa(uint(ip[12])) + ".in-addr.arpa.", nil + return strconv.Itoa(int(ip[15])) + "." + strconv.Itoa(int(ip[14])) + "." + strconv.Itoa(int(ip[13])) + "." + strconv.Itoa(int(ip[12])) + ".in-addr.arpa.", nil } // Must be IPv6 buf := make([]byte, 0, len(ip)*4+len("ip6.arpa.")) diff --git a/src/net/dnsclient_unix.go b/src/net/dnsclient_unix.go index 5e060a6b489..40f76062944 100644 --- a/src/net/dnsclient_unix.go +++ b/src/net/dnsclient_unix.go @@ -17,7 +17,7 @@ import ( "errors" "internal/bytealg" "internal/godebug" - "internal/itoa" + "internal/strconv" "internal/stringslite" "io" "os" @@ -559,7 +559,7 @@ func (o hostLookupOrder) String() string { if s, ok := lookupOrderName[o]; ok { return s } - return "hostLookupOrder=" + itoa.Itoa(int(o)) + "??" + return "hostLookupOrder=" + strconv.Itoa(int(o)) + "??" } func (r *Resolver) goLookupHostOrder(ctx context.Context, name string, order hostLookupOrder, conf *dnsConfig) (addrs []string, err error) { @@ -842,8 +842,7 @@ func (r *Resolver) goLookupPTR(ctx context.Context, addr string, order hostLooku } p, server, err := r.lookup(ctx, arpa, dnsmessage.TypePTR, conf) if err != nil { - var dnsErr *DNSError - if errors.As(err, &dnsErr) && dnsErr.IsNotFound { + if dnsErr, ok := errors.AsType[*DNSError](err); ok && dnsErr.IsNotFound { if order == hostLookupDNSFiles { names := lookupStaticAddr(addr) if len(names) > 0 { diff --git a/src/net/dnsclient_unix_test.go b/src/net/dnsclient_unix_test.go index 826b4daba1e..fc1d40f18b6 100644 --- a/src/net/dnsclient_unix_test.go +++ b/src/net/dnsclient_unix_test.go @@ -2627,8 +2627,7 @@ func TestLongDNSNames(t *testing.T) { } expectedErr := DNSError{Err: errNoSuchHost.Error(), Name: v.req, IsNotFound: true} - var dnsErr *DNSError - errors.As(err, &dnsErr) + dnsErr, _ := errors.AsType[*DNSError](err) if dnsErr == nil || *dnsErr != expectedErr { t.Errorf("%v: Lookup%v: unexpected error: %v", i, testName, err) } @@ -2820,8 +2819,7 @@ func TestLookupOrderFilesNoSuchHost(t *testing.T) { } expectedErr := DNSError{Err: errNoSuchHost.Error(), Name: testName, IsNotFound: true} - var dnsErr *DNSError - errors.As(err, &dnsErr) + dnsErr, _ := errors.AsType[*DNSError](err) if dnsErr == nil || *dnsErr != expectedErr { t.Errorf("Lookup%v: unexpected error: %v", v.name, err) } @@ -2853,8 +2851,7 @@ func TestExtendedRCode(t *testing.T) { r := &Resolver{PreferGo: true, Dial: fake.DialContext} _, _, err := r.tryOneName(context.Background(), getSystemDNSConfig(), "go.dev.", dnsmessage.TypeA) - var dnsErr *DNSError - if !(errors.As(err, &dnsErr) && dnsErr.Err == errServerMisbehaving.Error()) { + if dnsErr, ok := errors.AsType[*DNSError](err); !ok || dnsErr.Err != errServerMisbehaving.Error() { t.Fatalf("r.tryOneName(): unexpected error: %v", err) } } diff --git a/src/net/error_test.go b/src/net/error_test.go index ff254336211..8026144c3da 100644 --- a/src/net/error_test.go +++ b/src/net/error_test.go @@ -155,7 +155,6 @@ func TestDialError(t *testing.T) { d := Dialer{Timeout: someTimeout} for i, tt := range dialErrorTests { - i, tt := i, tt t.Run(fmt.Sprint(i), func(t *testing.T) { c, err := d.Dial(tt.network, tt.address) if err == nil { diff --git a/src/net/http/client.go b/src/net/http/client.go index ba095ea1e34..d6a80107355 100644 --- a/src/net/http/client.go +++ b/src/net/http/client.go @@ -172,8 +172,13 @@ func refererForURL(lastReq, newReq *url.URL, explicitRef string) string { // didTimeout is non-nil only if err != nil. func (c *Client) send(req *Request, deadline time.Time) (resp *Response, didTimeout func() bool, err error) { + cookieURL := req.URL + if req.Host != "" { + cookieURL = cloneURL(cookieURL) + cookieURL.Host = req.Host + } if c.Jar != nil { - for _, cookie := range c.Jar.Cookies(req.URL) { + for _, cookie := range c.Jar.Cookies(cookieURL) { req.AddCookie(cookie) } } @@ -183,7 +188,7 @@ func (c *Client) send(req *Request, deadline time.Time) (resp *Response, didTime } if c.Jar != nil { if rc := resp.Cookies(); len(rc) > 0 { - c.Jar.SetCookies(req.URL, rc) + c.Jar.SetCookies(cookieURL, rc) } } return resp, nil, nil @@ -685,8 +690,7 @@ func (c *Client) do(req *Request) (retres *Response, reterr error) { stripSensitiveHeaders = true } } - copyHeaders(req, stripSensitiveHeaders) - + copyHeaders(req, stripSensitiveHeaders, !includeBody) // Add the Referer header from the most recent // request URL to the new one, if it's not https->http: if ref := refererForURL(reqs[len(reqs)-1].URL, req.URL, req.Header.Get("Referer")); ref != "" { @@ -753,7 +757,7 @@ func (c *Client) do(req *Request) (retres *Response, reterr error) { // makeHeadersCopier makes a function that copies headers from the // initial Request, ireq. For every redirect, this function must be called // so that it can copy headers into the upcoming Request. -func (c *Client) makeHeadersCopier(ireq *Request) func(req *Request, stripSensitiveHeaders bool) { +func (c *Client) makeHeadersCopier(ireq *Request) func(req *Request, stripSensitiveHeaders, stripBodyHeaders bool) { // The headers to copy are from the very initial request. // We use a closured callback to keep a reference to these original headers. var ( @@ -767,7 +771,7 @@ func (c *Client) makeHeadersCopier(ireq *Request) func(req *Request, stripSensit } } - return func(req *Request, stripSensitiveHeaders bool) { + return func(req *Request, stripSensitiveHeaders, stripBodyHeaders bool) { // If Jar is present and there was some initial cookies provided // via the request header, then we may need to alter the initial // cookies as we follow redirects since each redirect may end up @@ -805,12 +809,21 @@ func (c *Client) makeHeadersCopier(ireq *Request) func(req *Request, stripSensit // (at least the safe ones). for k, vv := range ireqhdr { sensitive := false + body := false switch CanonicalHeaderKey(k) { case "Authorization", "Www-Authenticate", "Cookie", "Cookie2", "Proxy-Authorization", "Proxy-Authenticate": sensitive = true + + case "Content-Encoding", "Content-Language", "Content-Location", + "Content-Type": + // Headers relating to the body which is removed for + // POST to GET redirects + // https://fetch.spec.whatwg.org/#http-redirect-fetch + body = true + } - if !(sensitive && stripSensitiveHeaders) { + if !(sensitive && stripSensitiveHeaders) && !(body && stripBodyHeaders) { req.Header[k] = vv } } diff --git a/src/net/http/client_test.go b/src/net/http/client_test.go index 94fddb508e0..d184f720319 100644 --- a/src/net/http/client_test.go +++ b/src/net/http/client_test.go @@ -585,6 +585,36 @@ var echoCookiesRedirectHandler = HandlerFunc(func(w ResponseWriter, r *Request) } }) +func TestHostMismatchCookies(t *testing.T) { run(t, testHostMismatchCookies) } +func testHostMismatchCookies(t *testing.T, mode testMode) { + ts := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) { + for _, c := range r.Cookies() { + c.Value = "SetOnServer" + SetCookie(w, c) + } + })).ts + + reqURL, _ := url.Parse(ts.URL) + hostURL := *reqURL + hostURL.Host = "cookies.example.com" + + c := ts.Client() + c.Jar = new(TestJar) + c.Jar.SetCookies(reqURL, []*Cookie{{Name: "First", Value: "SetOnClient"}}) + c.Jar.SetCookies(&hostURL, []*Cookie{{Name: "Second", Value: "SetOnClient"}}) + + req, _ := NewRequest("GET", ts.URL, NoBody) + req.Host = hostURL.Host + resp, err := c.Do(req) + if err != nil { + t.Fatalf("Get: %v", err) + } + resp.Body.Close() + + matchReturnedCookies(t, []*Cookie{{Name: "First", Value: "SetOnClient"}}, c.Jar.Cookies(reqURL)) + matchReturnedCookies(t, []*Cookie{{Name: "Second", Value: "SetOnServer"}}, c.Jar.Cookies(&hostURL)) +} + func TestClientSendsCookieFromJar(t *testing.T) { defer afterTest(t) tr := &recordingTransport{} @@ -1591,6 +1621,39 @@ func testClientStripHeadersOnRepeatedRedirect(t *testing.T, mode testMode) { } } +func TestClientStripHeadersOnPostToGetRedirect(t *testing.T) { + run(t, testClientStripHeadersOnPostToGetRedirect) +} +func testClientStripHeadersOnPostToGetRedirect(t *testing.T, mode testMode) { + ts := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) { + if r.Method == "POST" { + Redirect(w, r, "/redirected", StatusFound) + return + } else if r.Method != "GET" { + t.Errorf("unexpected request method: %v", r.Method) + return + } + for key, val := range r.Header { + if strings.HasPrefix(key, "Content-") { + t.Errorf("unexpected request body header after redirect: %v: %v", key, val) + } + } + })).ts + + c := ts.Client() + + req, _ := NewRequest("POST", ts.URL, strings.NewReader("hello world")) + req.Header.Set("Content-Encoding", "a") + req.Header.Set("Content-Language", "b") + req.Header.Set("Content-Length", "c") + req.Header.Set("Content-Type", "d") + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() +} + // Issue 22233: copy host when Client follows a relative redirect. func TestClientCopyHostOnRedirect(t *testing.T) { run(t, testClientCopyHostOnRedirect) } func testClientCopyHostOnRedirect(t *testing.T, mode testMode) { diff --git a/src/net/http/cookie.go b/src/net/http/cookie.go index efe6cc3e77e..f74bc1043c5 100644 --- a/src/net/http/cookie.go +++ b/src/net/http/cookie.go @@ -7,6 +7,7 @@ package http import ( "errors" "fmt" + "internal/godebug" "log" "net" "net/http/internal/ascii" @@ -16,6 +17,8 @@ import ( "time" ) +var httpcookiemaxnum = godebug.New("httpcookiemaxnum") + // A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an // HTTP response or the Cookie header of an HTTP request. // @@ -58,16 +61,37 @@ const ( ) var ( - errBlankCookie = errors.New("http: blank cookie") - errEqualNotFoundInCookie = errors.New("http: '=' not found in cookie") - errInvalidCookieName = errors.New("http: invalid cookie name") - errInvalidCookieValue = errors.New("http: invalid cookie value") + errBlankCookie = errors.New("http: blank cookie") + errEqualNotFoundInCookie = errors.New("http: '=' not found in cookie") + errInvalidCookieName = errors.New("http: invalid cookie name") + errInvalidCookieValue = errors.New("http: invalid cookie value") + errCookieNumLimitExceeded = errors.New("http: number of cookies exceeded limit") ) +const defaultCookieMaxNum = 3000 + +func cookieNumWithinMax(cookieNum int) bool { + withinDefaultMax := cookieNum <= defaultCookieMaxNum + if httpcookiemaxnum.Value() == "" { + return withinDefaultMax + } + if customMax, err := strconv.Atoi(httpcookiemaxnum.Value()); err == nil { + withinCustomMax := customMax == 0 || cookieNum <= customMax + if withinDefaultMax != withinCustomMax { + httpcookiemaxnum.IncNonDefault() + } + return withinCustomMax + } + return withinDefaultMax +} + // ParseCookie parses a Cookie header value and returns all the cookies // which were set in it. Since the same cookie name can appear multiple times // the returned Values can contain more than one value for a given key. func ParseCookie(line string) ([]*Cookie, error) { + if !cookieNumWithinMax(strings.Count(line, ";") + 1) { + return nil, errCookieNumLimitExceeded + } parts := strings.Split(textproto.TrimString(line), ";") if len(parts) == 1 && parts[0] == "" { return nil, errBlankCookie @@ -197,11 +221,21 @@ func ParseSetCookie(line string) (*Cookie, error) { // readSetCookies parses all "Set-Cookie" values from // the header h and returns the successfully parsed Cookies. +// +// If the amount of cookies exceeds CookieNumLimit, and httpcookielimitnum +// GODEBUG option is not explicitly turned off, this function will silently +// fail and return an empty slice. func readSetCookies(h Header) []*Cookie { cookieCount := len(h["Set-Cookie"]) if cookieCount == 0 { return []*Cookie{} } + // Cookie limit was unfortunately introduced at a later point in time. + // As such, we can only fail by returning an empty slice rather than + // explicit error. + if !cookieNumWithinMax(cookieCount) { + return []*Cookie{} + } cookies := make([]*Cookie, 0, cookieCount) for _, line := range h["Set-Cookie"] { if cookie, err := ParseSetCookie(line); err == nil { @@ -329,13 +363,28 @@ func (c *Cookie) Valid() error { // readCookies parses all "Cookie" values from the header h and // returns the successfully parsed Cookies. // -// if filter isn't empty, only cookies of that name are returned. +// If filter isn't empty, only cookies of that name are returned. +// +// If the amount of cookies exceeds CookieNumLimit, and httpcookielimitnum +// GODEBUG option is not explicitly turned off, this function will silently +// fail and return an empty slice. func readCookies(h Header, filter string) []*Cookie { lines := h["Cookie"] if len(lines) == 0 { return []*Cookie{} } + // Cookie limit was unfortunately introduced at a later point in time. + // As such, we can only fail by returning an empty slice rather than + // explicit error. + cookieCount := 0 + for _, line := range lines { + cookieCount += strings.Count(line, ";") + 1 + } + if !cookieNumWithinMax(cookieCount) { + return []*Cookie{} + } + cookies := make([]*Cookie, 0, len(lines)+strings.Count(lines[0], ";")) for _, line := range lines { line = textproto.TrimString(line) diff --git a/src/net/http/cookie_test.go b/src/net/http/cookie_test.go index 8db4957b2cc..f452b4ec768 100644 --- a/src/net/http/cookie_test.go +++ b/src/net/http/cookie_test.go @@ -11,6 +11,7 @@ import ( "log" "os" "reflect" + "slices" "strings" "testing" "time" @@ -255,16 +256,17 @@ func TestAddCookie(t *testing.T) { } var readSetCookiesTests = []struct { - Header Header - Cookies []*Cookie + header Header + cookies []*Cookie + godebug string }{ { - Header{"Set-Cookie": {"Cookie-1=v$1"}}, - []*Cookie{{Name: "Cookie-1", Value: "v$1", Raw: "Cookie-1=v$1"}}, + header: Header{"Set-Cookie": {"Cookie-1=v$1"}}, + cookies: []*Cookie{{Name: "Cookie-1", Value: "v$1", Raw: "Cookie-1=v$1"}}, }, { - Header{"Set-Cookie": {"NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly"}}, + cookies: []*Cookie{{ Name: "NID", Value: "99=YsDT5i3E-CXax-", Path: "/", @@ -276,8 +278,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {".ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {".ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly"}}, + cookies: []*Cookie{{ Name: ".ASPXAUTH", Value: "7E3AA", Path: "/", @@ -288,8 +290,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {"ASP.NET_SessionId=foo; path=/; HttpOnly"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"ASP.NET_SessionId=foo; path=/; HttpOnly"}}, + cookies: []*Cookie{{ Name: "ASP.NET_SessionId", Value: "foo", Path: "/", @@ -298,8 +300,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {"samesitedefault=foo; SameSite"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"samesitedefault=foo; SameSite"}}, + cookies: []*Cookie{{ Name: "samesitedefault", Value: "foo", SameSite: SameSiteDefaultMode, @@ -307,8 +309,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {"samesiteinvalidisdefault=foo; SameSite=invalid"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"samesiteinvalidisdefault=foo; SameSite=invalid"}}, + cookies: []*Cookie{{ Name: "samesiteinvalidisdefault", Value: "foo", SameSite: SameSiteDefaultMode, @@ -316,8 +318,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {"samesitelax=foo; SameSite=Lax"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"samesitelax=foo; SameSite=Lax"}}, + cookies: []*Cookie{{ Name: "samesitelax", Value: "foo", SameSite: SameSiteLaxMode, @@ -325,8 +327,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {"samesitestrict=foo; SameSite=Strict"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"samesitestrict=foo; SameSite=Strict"}}, + cookies: []*Cookie{{ Name: "samesitestrict", Value: "foo", SameSite: SameSiteStrictMode, @@ -334,8 +336,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {"samesitenone=foo; SameSite=None"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"samesitenone=foo; SameSite=None"}}, + cookies: []*Cookie{{ Name: "samesitenone", Value: "foo", SameSite: SameSiteNoneMode, @@ -345,47 +347,66 @@ var readSetCookiesTests = []struct { // Make sure we can properly read back the Set-Cookie headers we create // for values containing spaces or commas: { - Header{"Set-Cookie": {`special-1=a z`}}, - []*Cookie{{Name: "special-1", Value: "a z", Raw: `special-1=a z`}}, + header: Header{"Set-Cookie": {`special-1=a z`}}, + cookies: []*Cookie{{Name: "special-1", Value: "a z", Raw: `special-1=a z`}}, }, { - Header{"Set-Cookie": {`special-2=" z"`}}, - []*Cookie{{Name: "special-2", Value: " z", Quoted: true, Raw: `special-2=" z"`}}, + header: Header{"Set-Cookie": {`special-2=" z"`}}, + cookies: []*Cookie{{Name: "special-2", Value: " z", Quoted: true, Raw: `special-2=" z"`}}, }, { - Header{"Set-Cookie": {`special-3="a "`}}, - []*Cookie{{Name: "special-3", Value: "a ", Quoted: true, Raw: `special-3="a "`}}, + header: Header{"Set-Cookie": {`special-3="a "`}}, + cookies: []*Cookie{{Name: "special-3", Value: "a ", Quoted: true, Raw: `special-3="a "`}}, }, { - Header{"Set-Cookie": {`special-4=" "`}}, - []*Cookie{{Name: "special-4", Value: " ", Quoted: true, Raw: `special-4=" "`}}, + header: Header{"Set-Cookie": {`special-4=" "`}}, + cookies: []*Cookie{{Name: "special-4", Value: " ", Quoted: true, Raw: `special-4=" "`}}, }, { - Header{"Set-Cookie": {`special-5=a,z`}}, - []*Cookie{{Name: "special-5", Value: "a,z", Raw: `special-5=a,z`}}, + header: Header{"Set-Cookie": {`special-5=a,z`}}, + cookies: []*Cookie{{Name: "special-5", Value: "a,z", Raw: `special-5=a,z`}}, }, { - Header{"Set-Cookie": {`special-6=",z"`}}, - []*Cookie{{Name: "special-6", Value: ",z", Quoted: true, Raw: `special-6=",z"`}}, + header: Header{"Set-Cookie": {`special-6=",z"`}}, + cookies: []*Cookie{{Name: "special-6", Value: ",z", Quoted: true, Raw: `special-6=",z"`}}, }, { - Header{"Set-Cookie": {`special-7=a,`}}, - []*Cookie{{Name: "special-7", Value: "a,", Raw: `special-7=a,`}}, + header: Header{"Set-Cookie": {`special-7=a,`}}, + cookies: []*Cookie{{Name: "special-7", Value: "a,", Raw: `special-7=a,`}}, }, { - Header{"Set-Cookie": {`special-8=","`}}, - []*Cookie{{Name: "special-8", Value: ",", Quoted: true, Raw: `special-8=","`}}, + header: Header{"Set-Cookie": {`special-8=","`}}, + cookies: []*Cookie{{Name: "special-8", Value: ",", Quoted: true, Raw: `special-8=","`}}, }, // Make sure we can properly read back the Set-Cookie headers // for names containing spaces: { - Header{"Set-Cookie": {`special-9 =","`}}, - []*Cookie{{Name: "special-9", Value: ",", Quoted: true, Raw: `special-9 =","`}}, + header: Header{"Set-Cookie": {`special-9 =","`}}, + cookies: []*Cookie{{Name: "special-9", Value: ",", Quoted: true, Raw: `special-9 =","`}}, }, // Quoted values (issue #46443) { - Header{"Set-Cookie": {`cookie="quoted"`}}, - []*Cookie{{Name: "cookie", Value: "quoted", Quoted: true, Raw: `cookie="quoted"`}}, + header: Header{"Set-Cookie": {`cookie="quoted"`}}, + cookies: []*Cookie{{Name: "cookie", Value: "quoted", Quoted: true, Raw: `cookie="quoted"`}}, + }, + { + header: Header{"Set-Cookie": slices.Repeat([]string{"a="}, defaultCookieMaxNum+1)}, + cookies: []*Cookie{}, + }, + { + header: Header{"Set-Cookie": slices.Repeat([]string{"a="}, 10)}, + cookies: []*Cookie{}, + godebug: "httpcookiemaxnum=5", + }, + { + header: Header{"Set-Cookie": strings.Split(strings.Repeat(";a=", defaultCookieMaxNum+1)[1:], ";")}, + cookies: slices.Repeat([]*Cookie{{Name: "a", Value: "", Quoted: false, Raw: "a="}}, defaultCookieMaxNum+1), + godebug: "httpcookiemaxnum=0", + }, + { + header: Header{"Set-Cookie": strings.Split(strings.Repeat(";a=", defaultCookieMaxNum+1)[1:], ";")}, + cookies: slices.Repeat([]*Cookie{{Name: "a", Value: "", Quoted: false, Raw: "a="}}, defaultCookieMaxNum+1), + godebug: fmt.Sprintf("httpcookiemaxnum=%v", defaultCookieMaxNum+1), }, // TODO(bradfitz): users have reported seeing this in the @@ -405,79 +426,103 @@ func toJSON(v any) string { func TestReadSetCookies(t *testing.T) { for i, tt := range readSetCookiesTests { + t.Setenv("GODEBUG", tt.godebug) for n := 0; n < 2; n++ { // to verify readSetCookies doesn't mutate its input - c := readSetCookies(tt.Header) - if !reflect.DeepEqual(c, tt.Cookies) { - t.Errorf("#%d readSetCookies: have\n%s\nwant\n%s\n", i, toJSON(c), toJSON(tt.Cookies)) + c := readSetCookies(tt.header) + if !reflect.DeepEqual(c, tt.cookies) { + t.Errorf("#%d readSetCookies: have\n%s\nwant\n%s\n", i, toJSON(c), toJSON(tt.cookies)) } } } } var readCookiesTests = []struct { - Header Header - Filter string - Cookies []*Cookie + header Header + filter string + cookies []*Cookie + godebug string }{ { - Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}}, - "", - []*Cookie{ + header: Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}}, + filter: "", + cookies: []*Cookie{ {Name: "Cookie-1", Value: "v$1"}, {Name: "c2", Value: "v2"}, }, }, { - Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}}, - "c2", - []*Cookie{ + header: Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}}, + filter: "c2", + cookies: []*Cookie{ {Name: "c2", Value: "v2"}, }, }, { - Header{"Cookie": {"Cookie-1=v$1; c2=v2"}}, - "", - []*Cookie{ + header: Header{"Cookie": {"Cookie-1=v$1; c2=v2"}}, + filter: "", + cookies: []*Cookie{ {Name: "Cookie-1", Value: "v$1"}, {Name: "c2", Value: "v2"}, }, }, { - Header{"Cookie": {"Cookie-1=v$1; c2=v2"}}, - "c2", - []*Cookie{ + header: Header{"Cookie": {"Cookie-1=v$1; c2=v2"}}, + filter: "c2", + cookies: []*Cookie{ {Name: "c2", Value: "v2"}, }, }, { - Header{"Cookie": {`Cookie-1="v$1"; c2="v2"`}}, - "", - []*Cookie{ + header: Header{"Cookie": {`Cookie-1="v$1"; c2="v2"`}}, + filter: "", + cookies: []*Cookie{ {Name: "Cookie-1", Value: "v$1", Quoted: true}, {Name: "c2", Value: "v2", Quoted: true}, }, }, { - Header{"Cookie": {`Cookie-1="v$1"; c2=v2;`}}, - "", - []*Cookie{ + header: Header{"Cookie": {`Cookie-1="v$1"; c2=v2;`}}, + filter: "", + cookies: []*Cookie{ {Name: "Cookie-1", Value: "v$1", Quoted: true}, {Name: "c2", Value: "v2"}, }, }, { - Header{"Cookie": {``}}, - "", - []*Cookie{}, + header: Header{"Cookie": {``}}, + filter: "", + cookies: []*Cookie{}, + }, + // GODEBUG=httpcookiemaxnum should work regardless if all cookies are sent + // via one "Cookie" field, or multiple fields. + { + header: Header{"Cookie": {strings.Repeat(";a=", defaultCookieMaxNum+1)[1:]}}, + cookies: []*Cookie{}, + }, + { + header: Header{"Cookie": slices.Repeat([]string{"a="}, 10)}, + cookies: []*Cookie{}, + godebug: "httpcookiemaxnum=5", + }, + { + header: Header{"Cookie": {strings.Repeat(";a=", defaultCookieMaxNum+1)[1:]}}, + cookies: slices.Repeat([]*Cookie{{Name: "a", Value: "", Quoted: false}}, defaultCookieMaxNum+1), + godebug: "httpcookiemaxnum=0", + }, + { + header: Header{"Cookie": slices.Repeat([]string{"a="}, defaultCookieMaxNum+1)}, + cookies: slices.Repeat([]*Cookie{{Name: "a", Value: "", Quoted: false}}, defaultCookieMaxNum+1), + godebug: fmt.Sprintf("httpcookiemaxnum=%v", defaultCookieMaxNum+1), }, } func TestReadCookies(t *testing.T) { for i, tt := range readCookiesTests { + t.Setenv("GODEBUG", tt.godebug) for n := 0; n < 2; n++ { // to verify readCookies doesn't mutate its input - c := readCookies(tt.Header, tt.Filter) - if !reflect.DeepEqual(c, tt.Cookies) { - t.Errorf("#%d readCookies:\nhave: %s\nwant: %s\n", i, toJSON(c), toJSON(tt.Cookies)) + c := readCookies(tt.header, tt.filter) + if !reflect.DeepEqual(c, tt.cookies) { + t.Errorf("#%d readCookies:\nhave: %s\nwant: %s\n", i, toJSON(c), toJSON(tt.cookies)) } } } @@ -690,6 +735,7 @@ func TestParseCookie(t *testing.T) { line string cookies []*Cookie err error + godebug string }{ { line: "Cookie-1=v$1", @@ -723,8 +769,28 @@ func TestParseCookie(t *testing.T) { line: "k1=\\", err: errInvalidCookieValue, }, + { + line: strings.Repeat(";a=", defaultCookieMaxNum+1)[1:], + err: errCookieNumLimitExceeded, + }, + { + line: strings.Repeat(";a=", 10)[1:], + err: errCookieNumLimitExceeded, + godebug: "httpcookiemaxnum=5", + }, + { + line: strings.Repeat(";a=", defaultCookieMaxNum+1)[1:], + cookies: slices.Repeat([]*Cookie{{Name: "a", Value: "", Quoted: false}}, defaultCookieMaxNum+1), + godebug: "httpcookiemaxnum=0", + }, + { + line: strings.Repeat(";a=", defaultCookieMaxNum+1)[1:], + cookies: slices.Repeat([]*Cookie{{Name: "a", Value: "", Quoted: false}}, defaultCookieMaxNum+1), + godebug: fmt.Sprintf("httpcookiemaxnum=%v", defaultCookieMaxNum+1), + }, } for i, tt := range tests { + t.Setenv("GODEBUG", tt.godebug) gotCookies, gotErr := ParseCookie(tt.line) if !errors.Is(gotErr, tt.err) { t.Errorf("#%d ParseCookie got error %v, want error %v", i, gotErr, tt.err) diff --git a/src/net/http/doc.go b/src/net/http/doc.go index f7ad3ae762f..24e07352ca7 100644 --- a/src/net/http/doc.go +++ b/src/net/http/doc.go @@ -84,27 +84,26 @@ custom Server: # HTTP/2 -Starting with Go 1.6, the http package has transparent support for the -HTTP/2 protocol when using HTTPS. Programs that must disable HTTP/2 -can do so by setting [Transport.TLSNextProto] (for clients) or -[Server.TLSNextProto] (for servers) to a non-nil, empty -map. Alternatively, the following GODEBUG settings are -currently supported: +The http package has transparent support for the HTTP/2 protocol. + +[Server] and [DefaultTransport] automatically enable HTTP/2 support +when using HTTPS. [Transport] does not enable HTTP/2 by default. + +To enable or disable support for HTTP/1, HTTP/2, and/or unencrypted HTTP/2, +see the [Server.Protocols] and [Transport.Protocols] configuration fields. + +To configure advanced HTTP/2 features, see the [Server.HTTP2] and +[Transport.HTTP2] configuration fields. + +Alternatively, the following GODEBUG settings are currently supported: GODEBUG=http2client=0 # disable HTTP/2 client support GODEBUG=http2server=0 # disable HTTP/2 server support GODEBUG=http2debug=1 # enable verbose HTTP/2 debug logs GODEBUG=http2debug=2 # ... even more verbose, with frame dumps -Please report any issues before disabling HTTP/2 support: https://golang.org/s/http2bug - -The http package's [Transport] and [Server] both automatically enable -HTTP/2 support for simple configurations. To enable HTTP/2 for more -complex configurations, to use lower-level HTTP/2 features, or to use -a newer version of Go's http2 package, import "golang.org/x/net/http2" -directly and use its ConfigureTransport and/or ConfigureServer -functions. Manually configuring HTTP/2 via the golang.org/x/net/http2 -package takes precedence over the net/http package's built-in HTTP/2 -support. +The "omithttp2" build tag may be used to disable the HTTP/2 implementation +contained in the http package. */ + package http diff --git a/src/net/http/fs_test.go b/src/net/http/fs_test.go index 9b34ad080ef..32fb696fee7 100644 --- a/src/net/http/fs_test.go +++ b/src/net/http/fs_test.go @@ -1540,7 +1540,6 @@ func testServeFileRejectsInvalidSuffixLengths(t *testing.T, mode testMode) { } for _, tt := range tests { - tt := tt t.Run(tt.r, func(t *testing.T) { req, err := NewRequest("GET", cst.URL+"/index.html", nil) if err != nil { diff --git a/src/net/http/h2_bundle.go b/src/net/http/h2_bundle.go index f09e102efb7..0df276321cf 100644 --- a/src/net/http/h2_bundle.go +++ b/src/net/http/h2_bundle.go @@ -11695,6 +11695,12 @@ func (ws *http2priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, prio q.prev.next = q q.next.prev = q } + + // Update the metadata. + ws.streams[streamID] = http2streamMetadata{ + location: q, + priority: priority, + } } func (ws *http2priorityWriteSchedulerRFC9218) Push(wr http2FrameWriteRequest) { diff --git a/src/net/http/h2_error_test.go b/src/net/http/h2_error_test.go index 5e400683b41..e71825451a8 100644 --- a/src/net/http/h2_error_test.go +++ b/src/net/http/h2_error_test.go @@ -25,19 +25,18 @@ func (e externalStreamError) Error() string { } func TestStreamError(t *testing.T) { - var target externalStreamError streamErr := http2streamError(42, http2ErrCodeProtocol) - ok := errors.As(streamErr, &target) + extStreamErr, ok := errors.AsType[externalStreamError](streamErr) if !ok { - t.Fatalf("errors.As failed") + t.Fatalf("errors.AsType failed") } - if target.StreamID != streamErr.StreamID { - t.Errorf("got StreamID %v, expected %v", target.StreamID, streamErr.StreamID) + if extStreamErr.StreamID != streamErr.StreamID { + t.Errorf("got StreamID %v, expected %v", extStreamErr.StreamID, streamErr.StreamID) } - if target.Cause != streamErr.Cause { - t.Errorf("got Cause %v, expected %v", target.Cause, streamErr.Cause) + if extStreamErr.Cause != streamErr.Cause { + t.Errorf("got Cause %v, expected %v", extStreamErr.Cause, streamErr.Cause) } - if uint32(target.Code) != uint32(streamErr.Code) { - t.Errorf("got Code %v, expected %v", target.Code, streamErr.Code) + if uint32(extStreamErr.Code) != uint32(streamErr.Code) { + t.Errorf("got Code %v, expected %v", extStreamErr.Code, streamErr.Code) } } diff --git a/src/net/http/httptest/recorder.go b/src/net/http/httptest/recorder.go index 17aa70f0676..4006f4406d2 100644 --- a/src/net/http/httptest/recorder.go +++ b/src/net/http/httptest/recorder.go @@ -105,23 +105,45 @@ func (rw *ResponseRecorder) writeHeader(b []byte, str string) { // Write implements http.ResponseWriter. The data in buf is written to // rw.Body, if not nil. func (rw *ResponseRecorder) Write(buf []byte) (int, error) { + // Record the write, even if we're going to return an error. rw.writeHeader(buf, "") if rw.Body != nil { rw.Body.Write(buf) } + if !bodyAllowedForStatus(rw.Code) { + return 0, http.ErrBodyNotAllowed + } return len(buf), nil } // WriteString implements [io.StringWriter]. The data in str is written // to rw.Body, if not nil. func (rw *ResponseRecorder) WriteString(str string) (int, error) { + // Record the write, even if we're going to return an error. rw.writeHeader(nil, str) if rw.Body != nil { rw.Body.WriteString(str) } + if !bodyAllowedForStatus(rw.Code) { + return 0, http.ErrBodyNotAllowed + } return len(str), nil } +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC 7230, section 3.3. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + func checkWriteHeaderCode(code int) { // Issue 22880: require valid WriteHeader status codes. // For now we only enforce that it's three digits. diff --git a/src/net/http/httptest/recorder_test.go b/src/net/http/httptest/recorder_test.go index 4782eced43e..9d1c4430c9b 100644 --- a/src/net/http/httptest/recorder_test.go +++ b/src/net/http/httptest/recorder_test.go @@ -5,6 +5,8 @@ package httptest import ( + "bytes" + "errors" "fmt" "io" "net/http" @@ -309,6 +311,26 @@ func TestRecorder(t *testing.T) { } } +func TestBodyNotAllowed(t *testing.T) { + rw := NewRecorder() + rw.Body = new(bytes.Buffer) + rw.WriteHeader(204) + + _, err := rw.Write([]byte("hello ")) + if !errors.Is(err, http.ErrBodyNotAllowed) { + t.Errorf("expected BodyNotAllowed for Write after 204, got: %v", err) + } + + _, err = rw.WriteString("world") + if !errors.Is(err, http.ErrBodyNotAllowed) { + t.Errorf("expected BodyNotAllowed for WriteString after 204, got: %v", err) + } + + if got, want := rw.Body.String(), "hello world"; got != want { + t.Errorf("got Body=%q, want %q", got, want) + } +} + // issue 39017 - disallow Content-Length values such as "+3" func TestParseContentLength(t *testing.T) { tests := []struct { @@ -352,7 +374,6 @@ func TestRecorderPanicsOnNonXXXStatusCode(t *testing.T) { -100, 0, 99, 1000, 20000, } for _, badCode := range badCodes { - badCode := badCode t.Run(fmt.Sprintf("Code=%d", badCode), func(t *testing.T) { defer func() { if r := recover(); r == nil { diff --git a/src/net/http/pprof/pprof.go b/src/net/http/pprof/pprof.go index e5a46ed253c..71aade67d32 100644 --- a/src/net/http/pprof/pprof.go +++ b/src/net/http/pprof/pprof.go @@ -352,13 +352,13 @@ func collectProfile(p *pprof.Profile) (*profile.Profile, error) { } var profileSupportsDelta = map[handler]bool{ - "allocs": true, - "block": true, - "goroutineleak": true, - "goroutine": true, - "heap": true, - "mutex": true, - "threadcreate": true, + "allocs": true, + "block": true, + "goroutineleak": true, + "goroutine": true, + "heap": true, + "mutex": true, + "threadcreate": true, } var profileDescriptions = map[string]string{ diff --git a/src/net/http/requestwrite_test.go b/src/net/http/requestwrite_test.go index 380ae9dec32..8b097cd5e15 100644 --- a/src/net/http/requestwrite_test.go +++ b/src/net/http/requestwrite_test.go @@ -15,6 +15,7 @@ import ( "strings" "testing" "testing/iotest" + "testing/synctest" "time" ) @@ -667,6 +668,13 @@ func TestRequestWrite(t *testing.T) { func TestRequestWriteTransport(t *testing.T) { t.Parallel() + // Run this test in a synctest bubble, since it relies on the transport + // successfully probing the request body within 200ms + // (see transferWriter.probeRequestBody). + // This occasionally flakes on slow builders (#52575) if we don't use a fake clock. + synctest.Test(t, testRequestWriteTransport) +} +func testRequestWriteTransport(t *testing.T) { matchSubstr := func(substr string) func(string) error { return func(written string) error { if !strings.Contains(written, substr) { diff --git a/src/net/http/serve_test.go b/src/net/http/serve_test.go index aee6288f3b2..4a16ba02af6 100644 --- a/src/net/http/serve_test.go +++ b/src/net/http/serve_test.go @@ -6701,7 +6701,6 @@ func testTimeoutHandlerSuperfluousLogs(t *testing.T, mode testMode) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { exitHandler := make(chan bool, 1) defer close(exitHandler) diff --git a/src/net/http/server.go b/src/net/http/server.go index 4078c899061..02554d1a201 100644 --- a/src/net/http/server.go +++ b/src/net/http/server.go @@ -3066,6 +3066,9 @@ type Server struct { // automatically closed when the function returns. // If TLSNextProto is not nil, HTTP/2 support is not enabled // automatically. + // + // Historically, TLSNextProto was used to disable HTTP/2 support. + // The Server.Protocols field now provides a simpler way to do this. TLSNextProto map[string]func(*Server, *tls.Conn, Handler) // ConnState specifies an optional callback function that is @@ -3094,9 +3097,6 @@ type Server struct { ConnContext func(ctx context.Context, c net.Conn) context.Context // HTTP2 configures HTTP/2 connections. - // - // This field does not yet have any effect. - // See https://go.dev/issue/67813. HTTP2 *HTTP2Config // Protocols is the set of protocols accepted by the server. diff --git a/src/net/http/transport.go b/src/net/http/transport.go index 5cef9be487a..a560765d331 100644 --- a/src/net/http/transport.go +++ b/src/net/http/transport.go @@ -249,6 +249,9 @@ type Transport struct { // must return a RoundTripper that then handles the request. // If TLSNextProto is not nil, HTTP/2 support is not enabled // automatically. + // + // Historically, TLSNextProto was used to disable HTTP/2 support. + // The Transport.Protocols field now provides a simpler way to do this. TLSNextProto map[string]func(authority string, c *tls.Conn) RoundTripper // ProxyConnectHeader optionally specifies headers to send to @@ -296,9 +299,6 @@ type Transport struct { ForceAttemptHTTP2 bool // HTTP2 configures HTTP/2 connections. - // - // This field does not yet have any effect. - // See https://go.dev/issue/67813. HTTP2 *HTTP2Config // Protocols is the set of protocols supported by the transport. diff --git a/src/net/http/transport_test.go b/src/net/http/transport_test.go index 75dbd25d225..8ab4107fb7b 100644 --- a/src/net/http/transport_test.go +++ b/src/net/http/transport_test.go @@ -4526,7 +4526,6 @@ func TestTransportContentEncodingCaseInsensitive(t *testing.T) { } func testTransportContentEncodingCaseInsensitive(t *testing.T, mode testMode) { for _, ce := range []string{"gzip", "GZIP"} { - ce := ce t.Run(ce, func(t *testing.T) { const encodedString = "Hello Gopher" ts := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) { diff --git a/src/net/interface.go b/src/net/interface.go index b6057780c4a..5ae3a3a149d 100644 --- a/src/net/interface.go +++ b/src/net/interface.go @@ -6,7 +6,7 @@ package net import ( "errors" - "internal/itoa" + "internal/strconv" "sync" "time" _ "unsafe" @@ -246,7 +246,7 @@ func (zc *ipv6ZoneCache) name(index int) string { zoneCache.RUnlock() } if !ok { // last resort - name = itoa.Uitoa(uint(index)) + name = strconv.Itoa(index) } return name } diff --git a/src/net/interface_plan9.go b/src/net/interface_plan9.go index 93c783b56ee..88f1325ab22 100644 --- a/src/net/interface_plan9.go +++ b/src/net/interface_plan9.go @@ -6,7 +6,7 @@ package net import ( "errors" - "internal/itoa" + "internal/strconv" "internal/stringslite" "os" ) @@ -40,8 +40,8 @@ func interfaceTable(ifindex int) ([]Interface, error) { func readInterface(i int) (*Interface, error) { ifc := &Interface{ - Index: i + 1, // Offset the index by one to suit the contract - Name: netdir + "/ipifc/" + itoa.Itoa(i), // Name is the full path to the interface path in plan9 + Index: i + 1, // Offset the index by one to suit the contract + Name: netdir + "/ipifc/" + strconv.Itoa(i), // Name is the full path to the interface path in plan9 } ifcstat := ifc.Name + "/status" diff --git a/src/net/ip.go b/src/net/ip.go index e3ee6ca70a3..7d58e89e112 100644 --- a/src/net/ip.go +++ b/src/net/ip.go @@ -14,7 +14,7 @@ package net import ( "internal/bytealg" - "internal/itoa" + "internal/strconv" "internal/stringslite" "net/netip" ) @@ -515,7 +515,7 @@ func (n *IPNet) String() string { if l == -1 { return nn.String() + "/" + m.String() } - return nn.String() + "/" + itoa.Uitoa(uint(l)) + return nn.String() + "/" + strconv.Itoa(l) } // ParseIP parses s as an IP address, returning the result. diff --git a/src/net/ipsock_plan9.go b/src/net/ipsock_plan9.go index 6ae9cf3cc10..2bd5071d56a 100644 --- a/src/net/ipsock_plan9.go +++ b/src/net/ipsock_plan9.go @@ -7,10 +7,9 @@ package net import ( "context" "internal/bytealg" - "internal/itoa" + "internal/strconv" "io/fs" "os" - "strconv" "syscall" ) @@ -338,9 +337,9 @@ func plan9LocalAddr(addr Addr) string { if port == 0 { return "" } - return itoa.Itoa(port) + return strconv.Itoa(port) } - return ip.String() + "!" + itoa.Itoa(port) + return ip.String() + "!" + strconv.Itoa(port) } func hangupCtlWrite(ctx context.Context, proto string, ctl *os.File, msg string) error { diff --git a/src/net/lookup_plan9.go b/src/net/lookup_plan9.go index c9bab29aded..836beb3b171 100644 --- a/src/net/lookup_plan9.go +++ b/src/net/lookup_plan9.go @@ -8,7 +8,7 @@ import ( "context" "errors" "internal/bytealg" - "internal/itoa" + "internal/strconv" "internal/stringslite" "io" "os" @@ -87,7 +87,7 @@ func queryCS1(ctx context.Context, net string, ip IP, port int) (clone, dest str if len(ip) != 0 && !ip.IsUnspecified() { ips = ip.String() } - lines, err := queryCS(ctx, net, ips, itoa.Itoa(port)) + lines, err := queryCS(ctx, net, ips, strconv.Itoa(port)) if err != nil { return } diff --git a/src/net/lookup_test.go b/src/net/lookup_test.go index 514cbd098ae..2a774100a8e 100644 --- a/src/net/lookup_test.go +++ b/src/net/lookup_test.go @@ -1420,8 +1420,8 @@ func testLookupNoData(t *testing.T, prefix string) { return } - var dnsErr *DNSError - if errors.As(err, &dnsErr) { + dnsErr, ok := errors.AsType[*DNSError](err) + if ok { succeeded := true if !dnsErr.IsNotFound { succeeded = false @@ -1455,8 +1455,7 @@ func testLookupNoData(t *testing.T, prefix string) { func TestLookupPortNotFound(t *testing.T) { allResolvers(t, func(t *testing.T) { _, err := LookupPort("udp", "_-unknown-service-") - var dnsErr *DNSError - if !errors.As(err, &dnsErr) || !dnsErr.IsNotFound { + if dnsErr, ok := errors.AsType[*DNSError](err); !ok || !dnsErr.IsNotFound { t.Fatalf("unexpected error: %v", err) } }) @@ -1475,8 +1474,7 @@ var tcpOnlyService = func() string { func TestLookupPortDifferentNetwork(t *testing.T) { allResolvers(t, func(t *testing.T) { _, err := LookupPort("udp", tcpOnlyService) - var dnsErr *DNSError - if !errors.As(err, &dnsErr) || !dnsErr.IsNotFound { + if dnsErr, ok := errors.AsType[*DNSError](err); !ok || !dnsErr.IsNotFound { t.Fatalf("unexpected error: %v", err) } }) diff --git a/src/net/mail/message.go b/src/net/mail/message.go index 14f839a0307..1502b359625 100644 --- a/src/net/mail/message.go +++ b/src/net/mail/message.go @@ -724,7 +724,8 @@ func (p *addrParser) consumeDomainLiteral() (string, error) { } // Parse the dtext - var dtext string + dtext := p.s + dtextLen := 0 for { if p.empty() { return "", errors.New("mail: unclosed domain-literal") @@ -741,9 +742,10 @@ func (p *addrParser) consumeDomainLiteral() (string, error) { return "", fmt.Errorf("mail: bad character in domain-literal: %q", r) } - dtext += p.s[:size] + dtextLen += size p.s = p.s[size:] } + dtext = dtext[:dtextLen] // Skip the trailing ] if !p.consume(']') { diff --git a/src/net/net_test.go b/src/net/net_test.go index 7269db8f2be..637c95540f4 100644 --- a/src/net/net_test.go +++ b/src/net/net_test.go @@ -24,7 +24,6 @@ func TestCloseRead(t *testing.T) { t.Parallel() for _, network := range []string{"tcp", "unix", "unixpacket"} { - network := network t.Run(network, func(t *testing.T) { if !testableNetwork(network) { t.Skipf("network %s is not testable on the current platform", network) @@ -83,7 +82,6 @@ func TestCloseWrite(t *testing.T) { } for _, network := range []string{"tcp", "unix", "unixpacket"} { - network := network t.Run(network, func(t *testing.T) { if !testableNetwork(network) { t.Skipf("network %s is not testable on the current platform", network) @@ -185,7 +183,6 @@ func TestCloseWrite(t *testing.T) { func TestConnClose(t *testing.T) { t.Parallel() for _, network := range []string{"tcp", "unix", "unixpacket"} { - network := network t.Run(network, func(t *testing.T) { if !testableNetwork(network) { t.Skipf("network %s is not testable on the current platform", network) @@ -227,7 +224,6 @@ func TestConnClose(t *testing.T) { func TestListenerClose(t *testing.T) { t.Parallel() for _, network := range []string{"tcp", "unix", "unixpacket"} { - network := network t.Run(network, func(t *testing.T) { if !testableNetwork(network) { t.Skipf("network %s is not testable on the current platform", network) @@ -265,7 +261,6 @@ func TestListenerClose(t *testing.T) { func TestPacketConnClose(t *testing.T) { t.Parallel() for _, network := range []string{"udp", "unixgram"} { - network := network t.Run(network, func(t *testing.T) { if !testableNetwork(network) { t.Skipf("network %s is not testable on the current platform", network) @@ -349,7 +344,6 @@ func TestAcceptIgnoreAbortedConnRequest(t *testing.T) { func TestZeroByteRead(t *testing.T) { t.Parallel() for _, network := range []string{"tcp", "unix", "unixpacket"} { - network := network t.Run(network, func(t *testing.T) { if !testableNetwork(network) { t.Skipf("network %s is not testable on the current platform", network) diff --git a/src/net/netip/netip.go b/src/net/netip/netip.go index b1b15b47287..10882db6a41 100644 --- a/src/net/netip/netip.go +++ b/src/net/netip/netip.go @@ -16,7 +16,6 @@ import ( "errors" "internal/bytealg" "internal/byteorder" - "internal/itoa" "math" "strconv" "unique" @@ -684,12 +683,12 @@ func (ip Addr) Prefix(b int) (Prefix, error) { return Prefix{}, nil case z4: if b > 32 { - return Prefix{}, errors.New("prefix length " + itoa.Itoa(b) + " too large for IPv4") + return Prefix{}, errors.New("prefix length " + strconv.Itoa(b) + " too large for IPv4") } effectiveBits += 96 default: if b > 128 { - return Prefix{}, errors.New("prefix length " + itoa.Itoa(b) + " too large for IPv6") + return Prefix{}, errors.New("prefix length " + strconv.Itoa(b) + " too large for IPv6") } } ip.addr = ip.addr.and(mask6(effectiveBits)) @@ -1593,5 +1592,5 @@ func (p Prefix) String() string { if !p.IsValid() { return "invalid Prefix" } - return p.ip.String() + "/" + itoa.Itoa(p.Bits()) + return p.ip.String() + "/" + strconv.Itoa(p.Bits()) } diff --git a/src/net/server_test.go b/src/net/server_test.go index eb6b111f1f5..cc9e9857099 100644 --- a/src/net/server_test.go +++ b/src/net/server_test.go @@ -250,7 +250,6 @@ var udpServerTests = []struct { func TestUDPServer(t *testing.T) { for i, tt := range udpServerTests { - i, tt := i, tt t.Run(fmt.Sprint(i), func(t *testing.T) { if !testableListenArgs(tt.snet, tt.saddr, tt.taddr) { t.Skipf("skipping %s %s<-%s test", tt.snet, tt.saddr, tt.taddr) @@ -340,7 +339,6 @@ func TestUnixgramServer(t *testing.T) { } for i, tt := range unixgramServerTests { - i, tt := i, tt t.Run(fmt.Sprint(i), func(t *testing.T) { if !testableListenArgs("unixgram", tt.saddr, "") { t.Skipf("skipping unixgram %s<-%s test", tt.saddr, tt.caddr) diff --git a/src/net/tcpsock.go b/src/net/tcpsock.go index 376bf238c70..35eda25ead0 100644 --- a/src/net/tcpsock.go +++ b/src/net/tcpsock.go @@ -6,7 +6,7 @@ package net import ( "context" - "internal/itoa" + "internal/strconv" "io" "net/netip" "os" @@ -47,9 +47,9 @@ func (a *TCPAddr) String() string { } ip := ipEmptyString(a.IP) if a.Zone != "" { - return JoinHostPort(ip+"%"+a.Zone, itoa.Itoa(a.Port)) + return JoinHostPort(ip+"%"+a.Zone, strconv.Itoa(a.Port)) } - return JoinHostPort(ip, itoa.Itoa(a.Port)) + return JoinHostPort(ip, strconv.Itoa(a.Port)) } func (a *TCPAddr) isWildcard() bool { diff --git a/src/net/tcpsockopt_plan9.go b/src/net/tcpsockopt_plan9.go index 017e87518ae..4c7958b8bde 100644 --- a/src/net/tcpsockopt_plan9.go +++ b/src/net/tcpsockopt_plan9.go @@ -7,7 +7,7 @@ package net import ( - "internal/itoa" + "internal/strconv" "syscall" "time" ) @@ -22,7 +22,7 @@ func setKeepAliveIdle(fd *netFD, d time.Duration) error { return nil } - cmd := "keepalive " + itoa.Itoa(int(d/time.Millisecond)) + cmd := "keepalive " + strconv.Itoa(int(d/time.Millisecond)) _, e := fd.ctl.WriteAt([]byte(cmd), 0) return e } diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go index 668c06c24c1..6df3a630917 100644 --- a/src/net/textproto/reader.go +++ b/src/net/textproto/reader.go @@ -285,8 +285,10 @@ func (r *Reader) ReadCodeLine(expectCode int) (code int, message string, err err // // An expectCode <= 0 disables the check of the status code. func (r *Reader) ReadResponse(expectCode int) (code int, message string, err error) { - code, continued, message, err := r.readCodeLine(expectCode) + code, continued, first, err := r.readCodeLine(expectCode) multi := continued + var messageBuilder strings.Builder + messageBuilder.WriteString(first) for continued { line, err := r.ReadLine() if err != nil { @@ -297,12 +299,15 @@ func (r *Reader) ReadResponse(expectCode int) (code int, message string, err err var moreMessage string code2, continued, moreMessage, err = parseCodeLine(line, 0) if err != nil || code2 != code { - message += "\n" + strings.TrimRight(line, "\r\n") + messageBuilder.WriteByte('\n') + messageBuilder.WriteString(strings.TrimRight(line, "\r\n")) continued = true continue } - message += "\n" + moreMessage + messageBuilder.WriteByte('\n') + messageBuilder.WriteString(moreMessage) } + message = messageBuilder.String() if err != nil && multi && message != "" { // replace one line error message with all lines (full message) err = &Error{code, message} diff --git a/src/net/timeout_test.go b/src/net/timeout_test.go index 0d009f69993..b7f8c613b4b 100644 --- a/src/net/timeout_test.go +++ b/src/net/timeout_test.go @@ -180,7 +180,6 @@ func TestAcceptTimeout(t *testing.T) { } for _, timeout := range timeouts { - timeout := timeout t.Run(fmt.Sprintf("%v", timeout), func(t *testing.T) { t.Parallel() diff --git a/src/net/udpsock.go b/src/net/udpsock.go index f9a3bee867d..fcd6a065688 100644 --- a/src/net/udpsock.go +++ b/src/net/udpsock.go @@ -6,7 +6,7 @@ package net import ( "context" - "internal/itoa" + "internal/strconv" "net/netip" "syscall" ) @@ -47,9 +47,9 @@ func (a *UDPAddr) String() string { } ip := ipEmptyString(a.IP) if a.Zone != "" { - return JoinHostPort(ip+"%"+a.Zone, itoa.Itoa(a.Port)) + return JoinHostPort(ip+"%"+a.Zone, strconv.Itoa(a.Port)) } - return JoinHostPort(ip, itoa.Itoa(a.Port)) + return JoinHostPort(ip, strconv.Itoa(a.Port)) } func (a *UDPAddr) isWildcard() bool { diff --git a/src/net/unixsock_test.go b/src/net/unixsock_test.go index 6758afddcaa..f6c5679f429 100644 --- a/src/net/unixsock_test.go +++ b/src/net/unixsock_test.go @@ -247,7 +247,6 @@ func TestUnixConnLocalAndRemoteNames(t *testing.T) { handler := func(ls *localServer, ln Listener) {} for _, laddr := range []string{"", testUnixAddr(t)} { - laddr := laddr taddr := testUnixAddr(t) ta, err := ResolveUnixAddr("unix", taddr) if err != nil { @@ -306,7 +305,6 @@ func TestUnixgramConnLocalAndRemoteNames(t *testing.T) { } for _, laddr := range []string{"", testUnixAddr(t)} { - laddr := laddr taddr := testUnixAddr(t) ta, err := ResolveUnixAddr("unixgram", taddr) if err != nil { diff --git a/src/net/url/encoding_table.go b/src/net/url/encoding_table.go new file mode 100644 index 00000000000..60b3564948e --- /dev/null +++ b/src/net/url/encoding_table.go @@ -0,0 +1,114 @@ +// Code generated from gen_encoding_table.go using 'go generate'; DO NOT EDIT. + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package url + +type encoding uint8 + +const ( + encodePath encoding = 1 << iota + encodePathSegment + encodeHost + encodeZone + encodeUserPassword + encodeQueryComponent + encodeFragment + + // hexChar is actually NOT an encoding mode, but there are only seven + // encoding modes. We might as well abuse the otherwise unused most + // significant bit in uint8 to indicate whether a character is + // hexadecimal. + hexChar +) + +var table = [256]encoding{ + '!': encodeFragment | encodeZone | encodeHost, + '"': encodeZone | encodeHost, + '$': encodeFragment | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '&': encodeFragment | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '\'': encodeZone | encodeHost, + '(': encodeFragment | encodeZone | encodeHost, + ')': encodeFragment | encodeZone | encodeHost, + '*': encodeFragment | encodeZone | encodeHost, + '+': encodeFragment | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + ',': encodeFragment | encodeUserPassword | encodeZone | encodeHost | encodePath, + '-': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '.': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '/': encodeFragment | encodePath, + '0': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '1': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '2': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '3': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '4': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '5': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '6': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '7': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '8': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '9': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + ':': encodeFragment | encodeZone | encodeHost | encodePathSegment | encodePath, + ';': encodeFragment | encodeUserPassword | encodeZone | encodeHost | encodePath, + '<': encodeZone | encodeHost, + '=': encodeFragment | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '>': encodeZone | encodeHost, + '?': encodeFragment, + '@': encodeFragment | encodePathSegment | encodePath, + 'A': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'B': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'C': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'D': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'E': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'F': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'G': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'H': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'I': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'J': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'K': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'L': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'M': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'N': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'O': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'P': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'Q': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'R': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'S': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'T': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'U': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'V': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'W': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'X': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'Y': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'Z': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '[': encodeZone | encodeHost, + ']': encodeZone | encodeHost, + '_': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'a': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'b': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'c': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'd': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'e': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'f': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'g': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'h': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'i': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'j': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'k': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'l': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'm': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'n': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'o': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'p': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'q': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'r': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 's': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 't': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'u': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'v': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'w': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'x': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'y': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'z': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '~': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, +} diff --git a/src/net/url/gen_encoding_table.go b/src/net/url/gen_encoding_table.go new file mode 100644 index 00000000000..5defe5046bb --- /dev/null +++ b/src/net/url/gen_encoding_table.go @@ -0,0 +1,234 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +import ( + "bytes" + _ "embed" + "fmt" + "go/format" + "io" + "log" + "maps" + "os" + "slices" + "strconv" + "strings" +) + +// We embed this source file in the resulting code-generation program in order +// to extract the definitions of the encoding type and constants from it and +// include them in the generated file. +// +//go:embed gen_encoding_table.go +var genSource string + +const filename = "encoding_table.go" + +func main() { + var out bytes.Buffer + fmt.Fprintln(&out, "// Code generated from gen_encoding_table.go using 'go generate'; DO NOT EDIT.") + fmt.Fprintln(&out) + fmt.Fprintln(&out, "// Copyright 2025 The Go Authors. All rights reserved.") + fmt.Fprintln(&out, "// Use of this source code is governed by a BSD-style") + fmt.Fprintln(&out, "// license that can be found in the LICENSE file.") + fmt.Fprintln(&out) + fmt.Fprintln(&out, "package url") + fmt.Fprintln(&out) + generateEnc(&out, genSource) + generateTable(&out) + + formatted, err := format.Source(out.Bytes()) + if err != nil { + log.Fatal("format:", err) + } + + err = os.WriteFile(filename, formatted, 0644) + if err != nil { + log.Fatal("WriteFile:", err) + } +} + +func generateEnc(w io.Writer, src string) { + var writeLine bool + for line := range strings.Lines(src) { + if strings.HasPrefix(line, "// START encoding") { + writeLine = true + continue + } + if strings.HasPrefix(line, "// END encoding") { + return + } + if writeLine { + fmt.Fprint(w, line) + } + } +} + +func generateTable(w io.Writer) { + fmt.Fprintln(w, "var table = [256]encoding{") + + // Sort the encodings (in decreasing order) to guarantee a stable output. + sortedEncs := slices.Sorted(maps.Keys(encNames)) + slices.Reverse(sortedEncs) + + for i := range 256 { + c := byte(i) + var lineBuf bytes.Buffer + + // Write key to line buffer. + lineBuf.WriteString(strconv.QuoteRune(rune(c))) + + lineBuf.WriteByte(':') + + // Write value to line buffer. + blankVal := true + if ishex(c) { + // Set the hexChar bit if this char is hexadecimal. + lineBuf.WriteString("hexChar") + blankVal = false + } + for _, enc := range sortedEncs { + if !shouldEscape(c, enc) { + if !blankVal { + lineBuf.WriteByte('|') + } + // Set this encoding mode's bit if this char should NOT be + // escaped. + name := encNames[enc] + lineBuf.WriteString(name) + blankVal = false + } + } + + if !blankVal { + lineBuf.WriteString(",\n") + w.Write(lineBuf.Bytes()) + } + } + fmt.Fprintln(w, "}") +} + +// START encoding (keep this marker comment in sync with genEnc) +type encoding uint8 + +const ( + encodePath encoding = 1 << iota + encodePathSegment + encodeHost + encodeZone + encodeUserPassword + encodeQueryComponent + encodeFragment + + // hexChar is actually NOT an encoding mode, but there are only seven + // encoding modes. We might as well abuse the otherwise unused most + // significant bit in uint8 to indicate whether a character is + // hexadecimal. + hexChar +) + +// END encoding (keep this marker comment in sync with genEnc) + +// Keep this in sync with the definitions of encoding mode constants. +var encNames = map[encoding]string{ + encodePath: "encodePath", + encodePathSegment: "encodePathSegment", + encodeHost: "encodeHost", + encodeZone: "encodeZone", + encodeUserPassword: "encodeUserPassword", + encodeQueryComponent: "encodeQueryComponent", + encodeFragment: "encodeFragment", +} + +// Return true if the specified character should be escaped when +// appearing in a URL string, according to RFC 3986. +// +// Please be informed that for now shouldEscape does not check all +// reserved characters correctly. See golang.org/issue/5684. +func shouldEscape(c byte, mode encoding) bool { + // §2.3 Unreserved characters (alphanum) + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + + if mode == encodeHost || mode == encodeZone { + // §3.2.2 Host allows + // sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "=" + // as part of reg-name. + // We add : because we include :port as part of host. + // We add [ ] because we include [ipv6]:port as part of host. + // We add < > because they're the only characters left that + // we could possibly allow, and Parse will reject them if we + // escape them (because hosts can't use %-encoding for + // ASCII bytes). + switch c { + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=', ':', '[', ']', '<', '>', '"': + return false + } + } + + switch c { + case '-', '_', '.', '~': // §2.3 Unreserved characters (mark) + return false + + case '$', '&', '+', ',', '/', ':', ';', '=', '?', '@': // §2.2 Reserved characters (reserved) + // Different sections of the URL allow a few of + // the reserved characters to appear unescaped. + switch mode { + case encodePath: // §3.3 + // The RFC allows : @ & = + $ but saves / ; , for assigning + // meaning to individual path segments. This package + // only manipulates the path as a whole, so we allow those + // last three as well. That leaves only ? to escape. + return c == '?' + + case encodePathSegment: // §3.3 + // The RFC allows : @ & = + $ but saves / ; , for assigning + // meaning to individual path segments. + return c == '/' || c == ';' || c == ',' || c == '?' + + case encodeUserPassword: // §3.2.1 + // The RFC allows ';', ':', '&', '=', '+', '$', and ',' in + // userinfo, so we must escape only '@', '/', and '?'. + // The parsing of userinfo treats ':' as special so we must escape + // that too. + return c == '@' || c == '/' || c == '?' || c == ':' + + case encodeQueryComponent: // §3.4 + // The RFC reserves (so we must escape) everything. + return true + + case encodeFragment: // §4.1 + // The RFC text is silent but the grammar allows + // everything, so escape nothing. + return false + } + } + + if mode == encodeFragment { + // RFC 3986 §2.2 allows not escaping sub-delims. A subset of sub-delims are + // included in reserved from RFC 2396 §2.2. The remaining sub-delims do not + // need to be escaped. To minimize potential breakage, we apply two restrictions: + // (1) we always escape sub-delims outside of the fragment, and (2) we always + // escape single quote to avoid breaking callers that had previously assumed that + // single quotes would be escaped. See issue #19917. + switch c { + case '!', '(', ')', '*': + return false + } + } + + // Everything else must be escaped. + return true +} + +func ishex(c byte) bool { + return '0' <= c && c <= '9' || + 'a' <= c && c <= 'f' || + 'A' <= c && c <= 'F' +} diff --git a/src/net/url/url.go b/src/net/url/url.go index 2a576594603..ca5ff9e3d70 100644 --- a/src/net/url/url.go +++ b/src/net/url/url.go @@ -7,6 +7,9 @@ // See RFC 3986. This package generally follows RFC 3986, except where // it deviates for compatibility reasons. // RFC 6874 followed for IPv6 zone literals. + +//go:generate go run gen_encoding_table.go + package url // When sending changes, first search old issues for history on decisions. @@ -15,7 +18,8 @@ package url import ( "errors" "fmt" - "maps" + "internal/godebug" + "net/netip" "path" "slices" "strconv" @@ -23,6 +27,8 @@ import ( _ "unsafe" // for linkname ) +var urlstrictcolons = godebug.New("urlstrictcolons") + // Error reports an error and the operation and URL that caused it. type Error struct { Op string @@ -50,42 +56,14 @@ func (e *Error) Temporary() bool { const upperhex = "0123456789ABCDEF" func ishex(c byte) bool { - switch { - case '0' <= c && c <= '9': - return true - case 'a' <= c && c <= 'f': - return true - case 'A' <= c && c <= 'F': - return true - } - return false + return table[c]&hexChar != 0 } +// Precondition: ishex(c) is true. func unhex(c byte) byte { - switch { - case '0' <= c && c <= '9': - return c - '0' - case 'a' <= c && c <= 'f': - return c - 'a' + 10 - case 'A' <= c && c <= 'F': - return c - 'A' + 10 - default: - panic("invalid hex character") - } + return 9*(c>>6) + (c & 15) } -type encoding int - -const ( - encodePath encoding = 1 + iota - encodePathSegment - encodeHost - encodeZone - encodeUserPassword - encodeQueryComponent - encodeFragment -) - type EscapeError string func (e EscapeError) Error() string { @@ -98,86 +76,9 @@ func (e InvalidHostError) Error() string { return "invalid character " + strconv.Quote(string(e)) + " in host name" } -// Return true if the specified character should be escaped when -// appearing in a URL string, according to RFC 3986. -// -// Please be informed that for now shouldEscape does not check all -// reserved characters correctly. See golang.org/issue/5684. +// See the reference implementation in gen_encoding_table.go. func shouldEscape(c byte, mode encoding) bool { - // §2.3 Unreserved characters (alphanum) - if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { - return false - } - - if mode == encodeHost || mode == encodeZone { - // §3.2.2 Host allows - // sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "=" - // as part of reg-name. - // We add : because we include :port as part of host. - // We add [ ] because we include [ipv6]:port as part of host. - // We add < > because they're the only characters left that - // we could possibly allow, and Parse will reject them if we - // escape them (because hosts can't use %-encoding for - // ASCII bytes). - switch c { - case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=', ':', '[', ']', '<', '>', '"': - return false - } - } - - switch c { - case '-', '_', '.', '~': // §2.3 Unreserved characters (mark) - return false - - case '$', '&', '+', ',', '/', ':', ';', '=', '?', '@': // §2.2 Reserved characters (reserved) - // Different sections of the URL allow a few of - // the reserved characters to appear unescaped. - switch mode { - case encodePath: // §3.3 - // The RFC allows : @ & = + $ but saves / ; , for assigning - // meaning to individual path segments. This package - // only manipulates the path as a whole, so we allow those - // last three as well. That leaves only ? to escape. - return c == '?' - - case encodePathSegment: // §3.3 - // The RFC allows : @ & = + $ but saves / ; , for assigning - // meaning to individual path segments. - return c == '/' || c == ';' || c == ',' || c == '?' - - case encodeUserPassword: // §3.2.1 - // The RFC allows ';', ':', '&', '=', '+', '$', and ',' in - // userinfo, so we must escape only '@', '/', and '?'. - // The parsing of userinfo treats ':' as special so we must escape - // that too. - return c == '@' || c == '/' || c == '?' || c == ':' - - case encodeQueryComponent: // §3.4 - // The RFC reserves (so we must escape) everything. - return true - - case encodeFragment: // §4.1 - // The RFC text is silent but the grammar allows - // everything, so escape nothing. - return false - } - } - - if mode == encodeFragment { - // RFC 3986 §2.2 allows not escaping sub-delims. A subset of sub-delims are - // included in reserved from RFC 2396 §2.2. The remaining sub-delims do not - // need to be escaped. To minimize potential breakage, we apply two restrictions: - // (1) we always escape sub-delims outside of the fragment, and (2) we always - // escape single quote to avoid breaking callers that had previously assumed that - // single quotes would be escaped. See issue #19917. - switch c { - case '!', '(', ')', '*': - return false - } - } - - // Everything else must be escaped. - return true + return table[c]&mode == 0 } // QueryUnescape does the inverse transformation of [QueryEscape], @@ -255,19 +156,24 @@ func unescape(s string, mode encoding) (string, error) { return s, nil } + var unescapedPlusSign byte + switch mode { + case encodeQueryComponent: + unescapedPlusSign = ' ' + default: + unescapedPlusSign = '+' + } var t strings.Builder t.Grow(len(s) - 2*n) for i := 0; i < len(s); i++ { switch s[i] { case '%': + // In the loop above, we established that unhex's precondition is + // fulfilled for both s[i+1] and s[i+2]. t.WriteByte(unhex(s[i+1])<<4 | unhex(s[i+2])) i += 2 case '+': - if mode == encodeQueryComponent { - t.WriteByte(' ') - } else { - t.WriteByte('+') - } + t.WriteByte(unescapedPlusSign) default: t.WriteByte(s[i]) } @@ -289,8 +195,7 @@ func PathEscape(s string) string { func escape(s string, mode encoding) string { spaceCount, hexCount := 0, 0 - for i := 0; i < len(s); i++ { - c := s[i] + for _, c := range []byte(s) { if shouldEscape(c, mode) { if c == ' ' && mode == encodeQueryComponent { spaceCount++ @@ -325,8 +230,8 @@ func escape(s string, mode encoding) string { } j := 0 - for i := 0; i < len(s); i++ { - switch c := s[i]; { + for _, c := range []byte(s) { + switch { case c == ' ' && mode == encodeQueryComponent: t[j] = '+' j++ @@ -336,7 +241,7 @@ func escape(s string, mode encoding) string { t[j+2] = upperhex[c&15] j += 3 default: - t[j] = s[i] + t[j] = c j++ } } @@ -364,25 +269,41 @@ func escape(s string, mode encoding) string { // A consequence is that it is impossible to tell which slashes in the Path were // slashes in the raw URL and which were %2f. This distinction is rarely important, // but when it is, the code should use the [URL.EscapedPath] method, which preserves -// the original encoding of Path. +// the original encoding of Path. The Fragment field is also stored in decoded form, +// use [URL.EscapedFragment] to retrieve the original encoding. // -// The RawPath field is an optional field which is only set when the default -// encoding of Path is different from the escaped path. See the EscapedPath method -// for more details. -// -// URL's String method uses the EscapedPath method to obtain the path. +// The [URL.String] method uses the [URL.EscapedPath] method to obtain the path. type URL struct { - Scheme string - Opaque string // encoded opaque data - User *Userinfo // username and password information - Host string // host or host:port (see Hostname and Port methods) - Path string // path (relative paths may omit leading slash) - RawPath string // encoded path hint (see EscapedPath method) - OmitHost bool // do not emit empty host (authority) - ForceQuery bool // append a query ('?') even if RawQuery is empty - RawQuery string // encoded query values, without '?' - Fragment string // fragment for references, without '#' - RawFragment string // encoded fragment hint (see EscapedFragment method) + Scheme string + Opaque string // encoded opaque data + User *Userinfo // username and password information + Host string // "host" or "host:port" (see Hostname and Port methods) + Path string // path (relative paths may omit leading slash) + Fragment string // fragment for references (without '#') + + // RawQuery contains the encoded query values, without the initial '?'. + // Use URL.Query to decode the query. + RawQuery string + + // RawPath is an optional field containing an encoded path hint. + // See the EscapedPath method for more details. + // + // In general, code should call EscapedPath instead of reading RawPath. + RawPath string + + // RawFragment is an optional field containing an encoded fragment hint. + // See the EscapedFragment method for more details. + // + // In general, code should call EscapedFragment instead of reading RawFragment. + RawFragment string + + // ForceQuery indicates whether the original URL contained a query ('?') character. + // When set, the String method will include a trailing '?', even when RawQuery is empty. + ForceQuery bool + + // OmitHost indicates the URL has an empty host (authority). + // When set, the String method will not include the host when it is empty. + OmitHost bool } // User returns a [Userinfo] containing the provided username @@ -626,41 +547,66 @@ func parseAuthority(authority string) (user *Userinfo, host string, err error) { // parseHost parses host as an authority without user // information. That is, as host[:port]. func parseHost(host string) (string, error) { - if strings.HasPrefix(host, "[") { + if openBracketIdx := strings.LastIndex(host, "["); openBracketIdx != -1 { // Parse an IP-Literal in RFC 3986 and RFC 6874. // E.g., "[fe80::1]", "[fe80::1%25en0]", "[fe80::1]:80". - i := strings.LastIndex(host, "]") - if i < 0 { + closeBracketIdx := strings.LastIndex(host, "]") + if closeBracketIdx < 0 { return "", errors.New("missing ']' in host") } - colonPort := host[i+1:] + + colonPort := host[closeBracketIdx+1:] if !validOptionalPort(colonPort) { return "", fmt.Errorf("invalid port %q after host", colonPort) } + unescapedColonPort, err := unescape(colonPort, encodeHost) + if err != nil { + return "", err + } + hostname := host[openBracketIdx+1 : closeBracketIdx] + var unescapedHostname string // RFC 6874 defines that %25 (%-encoded percent) introduces // the zone identifier, and the zone identifier can use basically // any %-encoding it likes. That's different from the host, which // can only %-encode non-ASCII bytes. // We do impose some restrictions on the zone, to avoid stupidity // like newlines. - zone := strings.Index(host[:i], "%25") - if zone >= 0 { - host1, err := unescape(host[:zone], encodeHost) + zoneIdx := strings.Index(hostname, "%25") + if zoneIdx >= 0 { + hostPart, err := unescape(hostname[:zoneIdx], encodeHost) if err != nil { return "", err } - host2, err := unescape(host[zone:i], encodeZone) + zonePart, err := unescape(hostname[zoneIdx:], encodeZone) if err != nil { return "", err } - host3, err := unescape(host[i:], encodeHost) + unescapedHostname = hostPart + zonePart + } else { + var err error + unescapedHostname, err = unescape(hostname, encodeHost) if err != nil { return "", err } - return host1 + host2 + host3, nil } - } else if i := strings.LastIndex(host, ":"); i != -1 { + + // Per RFC 3986, only a host identified by a valid + // IPv6 address can be enclosed by square brackets. + // This excludes any IPv4, but notably not IPv4-mapped addresses. + addr, err := netip.ParseAddr(unescapedHostname) + if err != nil { + return "", fmt.Errorf("invalid host: %w", err) + } + if addr.Is4() { + return "", errors.New("invalid IP-literal") + } + return "[" + unescapedHostname + "]" + unescapedColonPort, nil + } else if i := strings.Index(host, ":"); i != -1 { + if j := strings.LastIndex(host, ":"); urlstrictcolons.Value() == "0" && j != i { + urlstrictcolons.IncNonDefault() + i = j + } colonPort := host[i:] if !validOptionalPort(colonPort) { return "", fmt.Errorf("invalid port %q after host", colonPort) @@ -1008,7 +954,16 @@ func (v Values) Encode() string { return "" } var buf strings.Builder - for _, k := range slices.Sorted(maps.Keys(v)) { + // To minimize allocations, we eschew iterators and pre-size the slice in + // which we collect v's keys. + keys := make([]string, len(v)) + var i int + for k := range v { + keys[i] = k + i++ + } + slices.Sort(keys) + for _, k := range keys { vs := v[k] keyEscaped := QueryEscape(k) for _, v := range vs { diff --git a/src/net/url/url_test.go b/src/net/url/url_test.go index 16e08b63c6d..b601849ce55 100644 --- a/src/net/url/url_test.go +++ b/src/net/url/url_test.go @@ -13,6 +13,7 @@ import ( "io" "net" "reflect" + "strconv" "strings" "testing" ) @@ -383,6 +384,16 @@ var urltests = []URLTest{ }, "", }, + // valid IPv6 host with port and path + { + "https://[2001:db8::1]:8443/test/path", + &URL{ + Scheme: "https", + Host: "[2001:db8::1]:8443", + Path: "/test/path", + }, + "", + }, // host subcomponent; IPv6 address with zone identifier in RFC 6874 { "http://[fe80::1%25en0]/", // alphanum zone identifier @@ -496,26 +507,6 @@ var urltests = []URLTest{ }, "", }, - { - // Malformed IPv6 but still accepted. - "http://2b01:e34:ef40:7730:8e70:5aff:fefe:edac:8080/foo", - &URL{ - Scheme: "http", - Host: "2b01:e34:ef40:7730:8e70:5aff:fefe:edac:8080", - Path: "/foo", - }, - "", - }, - { - // Malformed IPv6 but still accepted. - "http://2b01:e34:ef40:7730:8e70:5aff:fefe:edac:/foo", - &URL{ - Scheme: "http", - Host: "2b01:e34:ef40:7730:8e70:5aff:fefe:edac:", - Path: "/foo", - }, - "", - }, { "http://[2b01:e34:ef40:7730:8e70:5aff:fefe:edac]:8080/foo", &URL{ @@ -707,6 +698,27 @@ var parseRequestURLTests = []struct { // RFC 6874. {"http://[fe80::1%en0]/", false}, {"http://[fe80::1%en0]:8080/", false}, + + // Tests exercising RFC 3986 compliance + {"https://[1:2:3:4:5:6:7:8]", true}, // full IPv6 address + {"https://[2001:db8::a:b:c:d]", true}, // compressed IPv6 address + {"https://[fe80::1%25eth0]", true}, // link-local address with zone ID (interface name) + {"https://[fe80::abc:def%254]", true}, // link-local address with zone ID (interface index) + {"https://[2001:db8::1]/path", true}, // compressed IPv6 address with path + {"https://[fe80::1%25eth0]/path?query=1", true}, // link-local with zone, path, and query + + {"https://[::ffff:192.0.2.1]", true}, + {"https://[:1] ", false}, + {"https://[1:2:3:4:5:6:7:8:9]", false}, + {"https://[1::1::1]", false}, + {"https://[1:2:3:]", false}, + {"https://[ffff::127.0.0.4000]", false}, + {"https://[0:0::test.com]:80", false}, + {"https://[2001:db8::test.com]", false}, + {"https://[test.com]", false}, + {"https://1:2:3:4:5:6:7:8", false}, + {"https://1:2:3:4:5:6:7:8:80", false}, + {"https://example.com:80:", false}, } func TestParseRequestURI(t *testing.T) { @@ -1080,6 +1092,17 @@ var encodeQueryTests = []EncodeQueryTest{ "b": {"b1", "b2", "b3"}, "c": {"c1", "c2", "c3"}, }, "a=a1&a=a2&a=a3&b=b1&b=b2&b=b3&c=c1&c=c2&c=c3"}, + {Values{ + "a": {"a"}, + "b": {"b"}, + "c": {"c"}, + "d": {"d"}, + "e": {"e"}, + "f": {"f"}, + "g": {"g"}, + "h": {"h"}, + "i": {"i"}, + }, "a=a&b=b&c=c&d=d&e=e&f=f&g=g&h=h&i=i"}, } func TestEncodeQuery(t *testing.T) { @@ -1090,6 +1113,17 @@ func TestEncodeQuery(t *testing.T) { } } +func BenchmarkEncodeQuery(b *testing.B) { + for _, tt := range encodeQueryTests { + b.Run(tt.expected, func(b *testing.B) { + b.ReportAllocs() + for b.Loop() { + tt.m.Encode() + } + }) + } +} + var resolvePathTests = []struct { base, ref, expected string }{ @@ -1643,6 +1677,18 @@ func TestParseErrors(t *testing.T) { {"cache_object:foo", true}, {"cache_object:foo/bar", true}, {"cache_object/:foo/bar", false}, + + {"http://[192.168.0.1]/", true}, // IPv4 in brackets + {"http://[192.168.0.1]:8080/", true}, // IPv4 in brackets with port + {"http://[::ffff:192.168.0.1]/", false}, // IPv4-mapped IPv6 in brackets + {"http://[::ffff:192.168.0.1000]/", true}, // Out of range IPv4-mapped IPv6 in brackets + {"http://[::ffff:192.168.0.1]:8080/", false}, // IPv4-mapped IPv6 in brackets with port + {"http://[::ffff:c0a8:1]/", false}, // IPv4-mapped IPv6 in brackets (hex) + {"http://[not-an-ip]/", true}, // invalid IP string in brackets + {"http://[fe80::1%foo]/", true}, // invalid zone format in brackets + {"http://[fe80::1", true}, // missing closing bracket + {"http://fe80::1]/", true}, // missing opening bracket + {"http://[test.com]/", true}, // domain name in brackets } for _, tt := range tests { u, err := Parse(tt.in) @@ -2218,3 +2264,25 @@ func TestJoinPath(t *testing.T) { } } } + +func TestParseStrictIpv6(t *testing.T) { + t.Setenv("GODEBUG", "urlstrictcolons=0") + + tests := []struct { + url string + }{ + // Malformed URLs that used to parse. + {"https://1:2:3:4:5:6:7:8"}, + {"https://1:2:3:4:5:6:7:8:80"}, + {"https://example.com:80:"}, + } + for i, tc := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + _, err := Parse(tc.url) + if err != nil { + t.Errorf("Parse(%q) error = %v, want nil", tc.url, err) + } + }) + } + +} diff --git a/src/os/exec/exec.go b/src/os/exec/exec.go index 38354a52443..e84ebfc453d 100644 --- a/src/os/exec/exec.go +++ b/src/os/exec/exec.go @@ -142,8 +142,8 @@ func (w wrappedError) Unwrap() error { // Cmd represents an external command being prepared or run. // -// A Cmd cannot be reused after calling its [Cmd.Run], [Cmd.Output] or [Cmd.CombinedOutput] -// methods. +// A Cmd cannot be reused after calling its [Cmd.Start], [Cmd.Run], +// [Cmd.Output], or [Cmd.CombinedOutput] methods. type Cmd struct { // Path is the path of the command to run. // diff --git a/src/os/exec/exec_posix_test.go b/src/os/exec/exec_posix_test.go index 77c5fc11e48..0711fac90e7 100644 --- a/src/os/exec/exec_posix_test.go +++ b/src/os/exec/exec_posix_test.go @@ -165,7 +165,6 @@ func TestImplicitPWD(t *testing.T) { } for _, tc := range cases { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -242,7 +241,6 @@ func TestExplicitPWD(t *testing.T) { // contain symlinks preserved from the PWD value in the test's environment. } for _, tc := range cases { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/src/os/exec/exec_test.go b/src/os/exec/exec_test.go index 3bded3dea60..1decebdc222 100644 --- a/src/os/exec/exec_test.go +++ b/src/os/exec/exec_test.go @@ -1378,8 +1378,8 @@ func TestWaitInterrupt(t *testing.T) { // The child process should be reported as failed, // and the grandchild will exit (or die by SIGPIPE) once the // stderr pipe is closed. - if ee := new(*exec.ExitError); !errors.As(err, ee) { - t.Errorf("Wait error = %v; want %T", err, *ee) + if ee, ok := errors.AsType[*exec.ExitError](err); !ok { + t.Errorf("Wait error = %v; want %T", err, ee) } }) @@ -1423,8 +1423,8 @@ func TestWaitInterrupt(t *testing.T) { // This command ignores SIGINT, sleeping until it is killed. // Wait should return the usual error for a killed process. - if ee := new(*exec.ExitError); !errors.As(err, ee) { - t.Errorf("Wait error = %v; want %T", err, *ee) + if ee, ok := errors.AsType[*exec.ExitError](err); !ok { + t.Errorf("Wait error = %v; want %T", err, ee) } }) @@ -1471,7 +1471,7 @@ func TestWaitInterrupt(t *testing.T) { t.Logf("stderr:\n%s", cmd.Stderr) t.Logf("[%d] %v", cmd.Process.Pid, err) - if ee := new(*exec.ExitError); !errors.As(err, ee) { + if _, ok := errors.AsType[*exec.ExitError](err); !ok { t.Errorf("Wait error = %v; want %v", err, ctx.Err()) } diff --git a/src/os/exec_plan9.go b/src/os/exec_plan9.go index a3d363b3440..7d50ab26287 100644 --- a/src/os/exec_plan9.go +++ b/src/os/exec_plan9.go @@ -5,7 +5,7 @@ package os import ( - "internal/itoa" + "internal/strconv" "syscall" "time" ) @@ -40,7 +40,7 @@ func startProcess(name string, argv []string, attr *ProcAttr) (p *Process, err e } func (p *Process) writeProcFile(file string, data string) error { - f, e := OpenFile("/proc/"+itoa.Itoa(p.Pid)+"/"+file, O_WRONLY, 0) + f, e := OpenFile("/proc/"+strconv.Itoa(p.Pid)+"/"+file, O_WRONLY, 0) if e != nil { return e } diff --git a/src/os/exec_posix.go b/src/os/exec_posix.go index 6b6977ab785..9f83db1555a 100644 --- a/src/os/exec_posix.go +++ b/src/os/exec_posix.go @@ -7,7 +7,7 @@ package os import ( - "internal/itoa" + "internal/strconv" "internal/syscall/execenv" "runtime" "syscall" @@ -132,16 +132,16 @@ func (p *ProcessState) String() string { case status.Exited(): code := status.ExitStatus() if runtime.GOOS == "windows" && uint(code) >= 1<<16 { // windows uses large hex numbers - res = "exit status " + itoa.Uitox(uint(code)) + res = "exit status 0x" + strconv.FormatUint(uint64(code), 16) } else { // unix systems use small decimal integers - res = "exit status " + itoa.Itoa(code) // unix + res = "exit status " + strconv.Itoa(code) // unix } case status.Signaled(): res = "signal: " + status.Signal().String() case status.Stopped(): res = "stop signal: " + status.StopSignal().String() if status.StopSignal() == syscall.SIGTRAP && status.TrapCause() != 0 { - res += " (trap " + itoa.Itoa(status.TrapCause()) + ")" + res += " (trap " + strconv.Itoa(status.TrapCause()) + ")" } case status.Continued(): res = "continued" diff --git a/src/os/executable_plan9.go b/src/os/executable_plan9.go index 8d8c83260f5..fcb269665bc 100644 --- a/src/os/executable_plan9.go +++ b/src/os/executable_plan9.go @@ -7,12 +7,12 @@ package os import ( - "internal/itoa" + "internal/strconv" "syscall" ) func executable() (string, error) { - fn := "/proc/" + itoa.Itoa(Getpid()) + "/text" + fn := "/proc/" + strconv.Itoa(Getpid()) + "/text" f, err := Open(fn) if err != nil { return "", err diff --git a/src/os/os_test.go b/src/os/os_test.go index 9f6eb13e1f9..536734901ba 100644 --- a/src/os/os_test.go +++ b/src/os/os_test.go @@ -840,8 +840,7 @@ func TestReaddirOfFile(t *testing.T) { if err == nil { t.Error("Readdirnames succeeded; want non-nil error") } - var pe *PathError - if !errors.As(err, &pe) || pe.Path != f.Name() { + if pe, ok := errors.AsType[*PathError](err); !ok || pe.Path != f.Name() { t.Errorf("Readdirnames returned %q; want a PathError with path %q", err, f.Name()) } if len(names) > 0 { diff --git a/src/os/os_windows_test.go b/src/os/os_windows_test.go index cd2413d26d4..3e7bddc7911 100644 --- a/src/os/os_windows_test.go +++ b/src/os/os_windows_test.go @@ -2275,3 +2275,16 @@ func TestOpenFileFlagInvalid(t *testing.T) { } f.Close() } + +func TestOpenFileTruncateNamedPipe(t *testing.T) { + t.Parallel() + name := pipeName() + pipe := newBytePipe(t, name, false) + defer pipe.Close() + + f, err := os.OpenFile(name, os.O_TRUNC|os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + t.Fatal(err) + } + f.Close() +} diff --git a/src/os/path_windows_test.go b/src/os/path_windows_test.go index 3fa02e2a65b..eea2b58ee0a 100644 --- a/src/os/path_windows_test.go +++ b/src/os/path_windows_test.go @@ -236,6 +236,23 @@ func TestRemoveAllLongPathRelative(t *testing.T) { } } +func TestRemoveAllFallback(t *testing.T) { + windows.TestDeleteatFallback = true + t.Cleanup(func() { windows.TestDeleteatFallback = false }) + + dir := t.TempDir() + if err := os.WriteFile(filepath.Join(dir, "file1"), []byte{}, 0700); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(dir, "file2"), []byte{}, 0400); err != nil { // read-only file + t.Fatal(err) + } + + if err := os.RemoveAll(dir); err != nil { + t.Fatal(err) + } +} + func testLongPathAbs(t *testing.T, target string) { t.Helper() testWalkFn := func(path string, info os.FileInfo, err error) error { diff --git a/src/os/root_windows_test.go b/src/os/root_windows_test.go index 8ae6f0c9d34..47643f98d10 100644 --- a/src/os/root_windows_test.go +++ b/src/os/root_windows_test.go @@ -228,3 +228,22 @@ func TestRootSymlinkToDirectory(t *testing.T) { }) } } + +func TestRootOpenFileTruncateNamedPipe(t *testing.T) { + t.Parallel() + name := pipeName() + pipe := newBytePipe(t, name, false) + defer pipe.Close() + + root, err := os.OpenRoot(filepath.Dir(name)) + if err != nil { + t.Fatal(err) + } + defer root.Close() + + f, err := root.OpenFile(filepath.Base(name), os.O_TRUNC|os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + t.Fatal(err) + } + f.Close() +} diff --git a/src/os/signal/signal_plan9_test.go b/src/os/signal/signal_plan9_test.go index 8357199aa4a..1d76cfcaaa6 100644 --- a/src/os/signal/signal_plan9_test.go +++ b/src/os/signal/signal_plan9_test.go @@ -5,7 +5,7 @@ package signal import ( - "internal/itoa" + "internal/strconv" "os" "runtime" "syscall" @@ -157,7 +157,7 @@ func TestStop(t *testing.T) { } func postNote(pid int, note string) error { - f, err := os.OpenFile("/proc/"+itoa.Itoa(pid)+"/note", os.O_WRONLY, 0) + f, err := os.OpenFile("/proc/"+strconv.Itoa(pid)+"/note", os.O_WRONLY, 0) if err != nil { return err } diff --git a/src/os/signal/signal_test.go b/src/os/signal/signal_test.go index 0aa0439b90c..8d3f230178e 100644 --- a/src/os/signal/signal_test.go +++ b/src/os/signal/signal_test.go @@ -347,7 +347,6 @@ func TestStop(t *testing.T) { } for _, sig := range sigs { - sig := sig t.Run(fmt.Sprint(sig), func(t *testing.T) { // When calling Notify with a specific signal, // independent signals should not interfere with each other, @@ -441,7 +440,6 @@ func TestNohup(t *testing.T) { subTimeout -= subTimeout / 10 // Leave 10% headroom for propagating output. } for i := 1; i <= 2; i++ { - i := i t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { t.Parallel() @@ -484,7 +482,6 @@ func TestNohup(t *testing.T) { subTimeout -= subTimeout / 10 // Leave 10% headroom for propagating output. } for i := 1; i <= 2; i++ { - i := i t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { t.Parallel() @@ -743,7 +740,6 @@ func TestNotifyContextNotifications(t *testing.T) { {"multiple", 10}, } for _, tc := range testCases { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/src/os/tempfile.go b/src/os/tempfile.go index 428dc965b7a..085423805fa 100644 --- a/src/os/tempfile.go +++ b/src/os/tempfile.go @@ -7,7 +7,7 @@ package os import ( "errors" "internal/bytealg" - "internal/itoa" + "internal/strconv" _ "unsafe" // for go:linkname ) @@ -20,7 +20,7 @@ import ( func runtime_rand() uint64 func nextRandom() string { - return itoa.Uitoa(uint(uint32(runtime_rand()))) + return strconv.FormatUint(uint64(uint32(runtime_rand())), 10) } // CreateTemp creates a new temporary file in the directory dir, diff --git a/src/path/filepath/path.go b/src/path/filepath/path.go index 5ffd9f0b6c3..ecf201ac000 100644 --- a/src/path/filepath/path.go +++ b/src/path/filepath/path.go @@ -173,19 +173,20 @@ func unixAbs(path string) (string, error) { return Join(wd, path), nil } -// Rel returns a relative path that is lexically equivalent to targpath when -// joined to basepath with an intervening separator. That is, -// [Join](basepath, Rel(basepath, targpath)) is equivalent to targpath itself. -// On success, the returned path will always be relative to basepath, -// even if basepath and targpath share no elements. -// An error is returned if targpath can't be made relative to basepath or if -// knowing the current working directory would be necessary to compute it. -// Rel calls [Clean] on the result. -func Rel(basepath, targpath string) (string, error) { - baseVol := VolumeName(basepath) - targVol := VolumeName(targpath) - base := Clean(basepath) - targ := Clean(targpath) +// Rel returns a relative path that is lexically equivalent to targPath when +// joined to basePath with an intervening separator. That is, +// [Join](basePath, Rel(basePath, targPath)) is equivalent to targPath itself. +// +// The returned path will always be relative to basePath, even if basePath and +// targPath share no elements. Rel calls [Clean] on the result. +// +// An error is returned if targPath can't be made relative to basePath +// or if knowing the current working directory would be necessary to compute it. +func Rel(basePath, targPath string) (string, error) { + baseVol := VolumeName(basePath) + targVol := VolumeName(targPath) + base := Clean(basePath) + targ := Clean(targPath) if sameWord(targ, base) { return ".", nil } @@ -194,7 +195,7 @@ func Rel(basepath, targpath string) (string, error) { if base == "." { base = "" } else if base == "" && filepathlite.VolumeNameLen(baseVol) > 2 /* isUNC */ { - // Treat any targetpath matching `\\host\share` basepath as absolute path. + // Treat any targetpath matching `\\host\share` basePath as absolute path. base = string(Separator) } @@ -202,7 +203,7 @@ func Rel(basepath, targpath string) (string, error) { baseSlashed := len(base) > 0 && base[0] == Separator targSlashed := len(targ) > 0 && targ[0] == Separator if baseSlashed != targSlashed || !sameWord(baseVol, targVol) { - return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath) + return "", errors.New("Rel: can't make " + targPath + " relative to " + basePath) } // Position base[b0:bi] and targ[t0:ti] at the first differing elements. bl := len(base) @@ -228,7 +229,7 @@ func Rel(basepath, targpath string) (string, error) { t0 = ti } if base[b0:bi] == ".." { - return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath) + return "", errors.New("Rel: can't make " + targPath + " relative to " + basePath) } if b0 != bl { // Base elements left. Must go up before going down. @@ -248,7 +249,7 @@ func Rel(basepath, targpath string) (string, error) { buf[n] = Separator copy(buf[n+1:], targ[t0:]) } - return string(buf), nil + return Clean(string(buf)), nil } return targ[t0:], nil } diff --git a/src/path/filepath/path_test.go b/src/path/filepath/path_test.go index 7ea02a7c282..ad99f70287f 100644 --- a/src/path/filepath/path_test.go +++ b/src/path/filepath/path_test.go @@ -936,7 +936,6 @@ func TestWalkSymlinkRoot(t *testing.T) { buggyGOOS: []string{"darwin", "ios"}, // https://go.dev/issue/59586 }, } { - tt := tt t.Run(tt.desc, func(t *testing.T) { var walked []string err := filepath.Walk(tt.root, func(path string, info fs.FileInfo, err error) error { @@ -1506,6 +1505,7 @@ var reltests = []RelTests{ {"/../../a/b", "/../../a/b/c/d", "c/d"}, {".", "a/b", "a/b"}, {".", "..", ".."}, + {"", "../../.", "../.."}, // can't do purely lexically {"..", ".", "err"}, diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go index 2a8c5206624..8509f00a5ee 100644 --- a/src/reflect/all_test.go +++ b/src/reflect/all_test.go @@ -6198,19 +6198,6 @@ func TestChanOfDir(t *testing.T) { } func TestChanOfGC(t *testing.T) { - done := make(chan bool, 1) - go func() { - select { - case <-done: - case <-time.After(5 * time.Second): - panic("deadlock in TestChanOfGC") - } - }() - - defer func() { - done <- true - }() - type T *uintptr tt := TypeOf(T(nil)) ct := ChanOf(BothDir, tt) @@ -7528,7 +7515,6 @@ func TestTypeStrings(t *testing.T) { func TestOffsetLock(t *testing.T) { var wg sync.WaitGroup for i := 0; i < 4; i++ { - i := i wg.Add(1) go func() { for j := 0; j < 50; j++ { @@ -8110,11 +8096,11 @@ func TestValue_Len(t *testing.T) { func TestValue_Comparable(t *testing.T) { var a int var s []int - var i interface{} = a - var iNil interface{} - var iSlice interface{} = s - var iArrayFalse interface{} = [2]interface{}{1, map[int]int{}} - var iArrayTrue interface{} = [2]interface{}{1, struct{ I interface{} }{1}} + var i any = a + var iNil any + var iSlice any = s + var iArrayFalse any = [2]any{1, map[int]int{}} + var iArrayTrue any = [2]any{1, struct{ I any }{1}} var testcases = []struct { value Value comparable bool @@ -8251,22 +8237,22 @@ func TestValue_Comparable(t *testing.T) { false, }, { - ValueOf([2]struct{ I interface{} }{{1}, {1}}), + ValueOf([2]struct{ I any }{{1}, {1}}), true, false, }, { - ValueOf([2]struct{ I interface{} }{{[]int{}}, {1}}), + ValueOf([2]struct{ I any }{{[]int{}}, {1}}), false, false, }, { - ValueOf([2]interface{}{1, struct{ I int }{1}}), + ValueOf([2]any{1, struct{ I int }{1}}), true, false, }, { - ValueOf([2]interface{}{[1]interface{}{map[int]int{}}, struct{ I int }{1}}), + ValueOf([2]any{[1]any{map[int]int{}}, struct{ I int }{1}}), false, false, }, @@ -8300,10 +8286,10 @@ type ValueEqualTest struct { vDeref, uDeref bool } -var equalI interface{} = 1 -var equalSlice interface{} = []int{1} -var nilInterface interface{} -var mapInterface interface{} = map[int]int{} +var equalI any = 1 +var equalSlice any = []int{1} +var nilInterface any +var mapInterface any = map[int]int{} var valueEqualTests = []ValueEqualTest{ { @@ -8482,8 +8468,8 @@ func TestValue_EqualNonComparable(t *testing.T) { // Value of array is non-comparable because of non-comparable elements. ValueOf([0]map[int]int{}), ValueOf([0]func(){}), - ValueOf(([1]struct{ I interface{} }{{[]int{}}})), - ValueOf(([1]interface{}{[1]interface{}{map[int]int{}}})), + ValueOf(([1]struct{ I any }{{[]int{}}})), + ValueOf(([1]any{[1]any{map[int]int{}}})), } for _, value := range values { // Panic when reflect.Value.Equal using two valid non-comparable values. @@ -8548,7 +8534,6 @@ func TestClear(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() if !tc.testFunc(tc.value) { @@ -8582,7 +8567,6 @@ func TestValuePointerAndUnsafePointer(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { if got := tc.val.Pointer(); got != uintptr(tc.wantUnsafePointer) { t.Errorf("unexpected uintptr result, got %#x, want %#x", got, uintptr(tc.wantUnsafePointer)) diff --git a/src/reflect/type.go b/src/reflect/type.go index fc6edb1e106..9b8726824e6 100644 --- a/src/reflect/type.go +++ b/src/reflect/type.go @@ -314,7 +314,10 @@ const Ptr = Pointer // uncommonType is present only for defined types or types with methods // (if T is a defined type, the uncommonTypes for T and *T have methods). -// Using a pointer to this struct reduces the overall size required +// When present, the uncommonType struct immediately follows the +// abi.Type struct in memory. +// The abi.TFlagUncommon indicates the presence of uncommonType. +// Using an optional struct reduces the overall size required // to describe a non-defined type with no methods. type uncommonType = abi.UncommonType diff --git a/src/reflect/type_test.go b/src/reflect/type_test.go index fc76a4fb985..00344c62317 100644 --- a/src/reflect/type_test.go +++ b/src/reflect/type_test.go @@ -12,7 +12,7 @@ import ( func TestTypeFor(t *testing.T) { type ( mystring string - myiface interface{} + myiface any ) testcases := []struct { diff --git a/src/reflect/value.go b/src/reflect/value.go index c0ac45de77b..b5d5aa8bf2e 100644 --- a/src/reflect/value.go +++ b/src/reflect/value.go @@ -8,7 +8,7 @@ import ( "errors" "internal/abi" "internal/goarch" - "internal/itoa" + "internal/strconv" "internal/unsafeheader" "math" "runtime" @@ -3573,7 +3573,7 @@ func cvtStringRunes(v Value, t Type) Value { func cvtSliceArrayPtr(v Value, t Type) Value { n := t.Elem().Len() if n > v.Len() { - panic("reflect: cannot convert slice with length " + itoa.Itoa(v.Len()) + " to pointer to array with length " + itoa.Itoa(n)) + panic("reflect: cannot convert slice with length " + strconv.Itoa(v.Len()) + " to pointer to array with length " + strconv.Itoa(n)) } h := (*unsafeheader.Slice)(v.ptr) return Value{t.common(), h.Data, v.flag&^(flagIndir|flagAddr|flagKindMask) | flag(Pointer)} @@ -3583,7 +3583,7 @@ func cvtSliceArrayPtr(v Value, t Type) Value { func cvtSliceArray(v Value, t Type) Value { n := t.Len() if n > v.Len() { - panic("reflect: cannot convert slice with length " + itoa.Itoa(v.Len()) + " to array with length " + itoa.Itoa(n)) + panic("reflect: cannot convert slice with length " + strconv.Itoa(v.Len()) + " to array with length " + strconv.Itoa(n)) } h := (*unsafeheader.Slice)(v.ptr) typ := t.common() diff --git a/src/reflect/visiblefields_test.go b/src/reflect/visiblefields_test.go index 66d545dd1f7..eca0925f966 100644 --- a/src/reflect/visiblefields_test.go +++ b/src/reflect/visiblefields_test.go @@ -292,7 +292,6 @@ type Rec2 struct { func TestFields(t *testing.T) { for _, test := range fieldsTests { - test := test t.Run(test.testName, func(t *testing.T) { typ := TypeOf(test.val) fields := VisibleFields(typ) diff --git a/src/runtime/HACKING.md b/src/runtime/HACKING.md index 141fae9914a..89727ee80f1 100644 --- a/src/runtime/HACKING.md +++ b/src/runtime/HACKING.md @@ -266,6 +266,153 @@ If memory is already in a type-safe state and is simply being set to the zero value, this must be done using regular writes, `typedmemclr`, or `memclrHasPointers`. This performs write barriers. +Linkname conventions +==================== + +``` +//go:linkname localname [importpath.name] +``` + +`//go:linkname` specifies the symbol name (`importpath.name`) used to a +reference a local identifier (`localname`). The target symbol name is an +arbitrary ELF/macho/etc symbol name, but by convention we typically use a +package-prefixed symbol name to keep things organized. + +The full generality of `//go:linkname` is very flexible, so as a convention to +simplify things, we define three standard forms of `//go:linkname` directives. + +When possible, always prefer to use the linkname "handshake" described below. + +"Push linkname" +--------------- + +A "push" linkname gives a local _definition_ a final symbol name in a different +package. This effectively "pushes" the symbol to the other package. + +``` +//go:linkname foo otherpkg.foo +func foo() { + // impl +} +``` + +The other package needs a _declaration_ to use the symbol from Go, or it can +directly reference the symbol in assembly. Typically this is an "export +linkname" declaration (below). + +"Pull linkname" +--------------- + +A "pull" linkname gives references to a local _declaration_ a final symbol name +in a different package. This effectively "pulls" the symbol from the other +package. + +``` +//go:linkname foo otherpkg.foo +func foo() +``` + +The other package simply needs to define the symbol, but typically this is a +"export linkname" definition (below). + +"Export linkname" +----------------- + +The second argument to `//go:linkname` is the target symbol name. If it is +omitted, the toolchain uses the default symbol name. In other words, this is a +linkname to itself. This seems to be a no-op, but it is used to mean that this +symbol is "exported" for use with another linkname. + +``` +//go:linkname foo +func foo() { + // impl +} +``` + +When applied to a definition, an export linkname indicates that another package +has a pull linkname targeting this symbol. This has a few effects: + +- The compiler avoids generates ABI wrappers for ABI0 and/or ABIInternal, so a + symbol defined in Go can be referenced from assembly in another package, or + vice versa. +- The linker will allow pull linknames to this symbol even with + `-checklinkname=true` (see "Handshake" section below). + +``` +//go:linkname foo +func foo() +``` + +When applied to a declaration, an export linkname indicates that another package +has a push linkname targeting this symbol. Other than documentation, the only +effect this has on the toolchain is that the compiler will not require a `.s` +file in the package (normally the compiler requires a `.s` file when there are +function declarations without a body). + +Handshake +--------- + +We always prefer to use push linknames rather than pull linknames. With a push +linkname, the package with the definition is aware it is publishing an API to +another package. On the other hand, with a pull linkname, the definition +package may be completely unaware of the dependency and may unintentionally +break users. + +The preferred form for a linkname is to use a push linkname in the defining +package, and a target linkname in the receiving package. The latter is not +strictly required, but serves as documentation. By convention, the receiving +package names the symbol containing the source package to further aid +documentation. + +``` +package runtime + +//go:linkname foo otherpkg.runtime_foo +func foo() { + // impl +} +``` + +``` +package otherpkg + +//go:linkname runtime_foo +func runtime_foo() +``` + +As of Go 1.23, the linker forbids pull linknames of symbols in the standard +library unless they participate in a handshake. Since many third-party packages +already have pull linknames to standard library functions, for backwards +compatibility, standard library symbols that are the target of external pull +linknames must use a target linkname to signal to the linker that pull +linknames are acceptable. + +``` +package runtime + +//go:linkname fastrand +func fastrand() { + // impl +} +``` + +Note that linker enforcement can be disabled with the `-checklinkname=false` +flag. + +Variables +--------- + +All of the examples above use `//go:linkname` on functions. It is also possible +to use it on global variables as well, though this is much less common. + +Variables don't have a clear distinction between definition and declaration. As +a rule, only one side should have a non-zero initial value. That side is the +"definition" and the other is the "declaration". + +Both sides should have the same type, including size. Though if one side is +larger than another, the linker allocates space for the larger size. + Runtime-only compiler directives ================================ @@ -371,9 +518,8 @@ The parser for the execution trace format lives in the `internal/trace` package. If you plan on adding new trace events, consider starting with a [trace experiment](../internal/trace/tracev2/EXPERIMENTS.md). -If you plan to add new trace instrumentation to the runtime, wrap whatever operation -you're tracing in `traceAcquire` and `traceRelease` fully. These functions mark a -critical section that appears atomic to the execution tracer (but nothing else). +If you plan to add new trace instrumentation to the runtime, read the comment +at the top of [trace.go](./trace.go), especially the invariants. debuglog ======== diff --git a/src/runtime/alg.go b/src/runtime/alg.go index b956f9d05af..c5951dc20b8 100644 --- a/src/runtime/alg.go +++ b/src/runtime/alg.go @@ -14,10 +14,23 @@ import ( ) const ( - c0 = uintptr((8-goarch.PtrSize)/4*2860486313 + (goarch.PtrSize-4)/4*33054211828000289) - c1 = uintptr((8-goarch.PtrSize)/4*3267000013 + (goarch.PtrSize-4)/4*23344194077549503) + // We use 32-bit hash on Wasm, see hash32.go. + hashSize = (1-goarch.IsWasm)*goarch.PtrSize + goarch.IsWasm*4 + c0 = uintptr((8-hashSize)/4*2860486313 + (hashSize-4)/4*33054211828000289) + c1 = uintptr((8-hashSize)/4*3267000013 + (hashSize-4)/4*23344194077549503) ) +func trimHash(h uintptr) uintptr { + if goarch.IsWasm != 0 { + // On Wasm, we use 32-bit hash, despite that uintptr is 64-bit. + // memhash* always returns a uintptr with high 32-bit being 0 + // (see hash32.go). We trim the hash in other places where we + // compute the hash manually, e.g. in interhash. + return uintptr(uint32(h)) + } + return h +} + func memhash0(p unsafe.Pointer, h uintptr) uintptr { return h } @@ -100,9 +113,9 @@ func f32hash(p unsafe.Pointer, h uintptr) uintptr { f := *(*float32)(p) switch { case f == 0: - return c1 * (c0 ^ h) // +0, -0 + return trimHash(c1 * (c0 ^ h)) // +0, -0 case f != f: - return c1 * (c0 ^ h ^ uintptr(rand())) // any kind of NaN + return trimHash(c1 * (c0 ^ h ^ uintptr(rand()))) // any kind of NaN default: return memhash(p, h, 4) } @@ -112,9 +125,9 @@ func f64hash(p unsafe.Pointer, h uintptr) uintptr { f := *(*float64)(p) switch { case f == 0: - return c1 * (c0 ^ h) // +0, -0 + return trimHash(c1 * (c0 ^ h)) // +0, -0 case f != f: - return c1 * (c0 ^ h ^ uintptr(rand())) // any kind of NaN + return trimHash(c1 * (c0 ^ h ^ uintptr(rand()))) // any kind of NaN default: return memhash(p, h, 8) } @@ -145,9 +158,9 @@ func interhash(p unsafe.Pointer, h uintptr) uintptr { panic(errorString("hash of unhashable type " + toRType(t).string())) } if t.IsDirectIface() { - return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0) + return trimHash(c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)) } else { - return c1 * typehash(t, a.data, h^c0) + return trimHash(c1 * typehash(t, a.data, h^c0)) } } @@ -172,9 +185,9 @@ func nilinterhash(p unsafe.Pointer, h uintptr) uintptr { panic(errorString("hash of unhashable type " + toRType(t).string())) } if t.IsDirectIface() { - return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0) + return trimHash(c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)) } else { - return c1 * typehash(t, a.data, h^c0) + return trimHash(c1 * typehash(t, a.data, h^c0)) } } diff --git a/src/runtime/align_runtime_test.go b/src/runtime/align_runtime_test.go index 4bcb49db2f5..8b21934a75f 100644 --- a/src/runtime/align_runtime_test.go +++ b/src/runtime/align_runtime_test.go @@ -14,7 +14,6 @@ import "unsafe" // operations (all the *64 operations in internal/runtime/atomic). var AtomicFields = []uintptr{ unsafe.Offsetof(m{}.procid), - unsafe.Offsetof(p{}.gcFractionalMarkTime), unsafe.Offsetof(profBuf{}.overflow), unsafe.Offsetof(profBuf{}.overflowTime), unsafe.Offsetof(heapStatsDelta{}.tinyAllocCount), diff --git a/src/runtime/arena.go b/src/runtime/arena.go index 52a2a99d6ca..2095bfa8e02 100644 --- a/src/runtime/arena.go +++ b/src/runtime/arena.go @@ -1051,7 +1051,11 @@ func (h *mheap) allocUserArenaChunk() *mspan { // Model the user arena as a heap span for a large object. spc := makeSpanClass(0, false) - h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages) + // A user arena chunk is always fresh from the OS. It's either newly allocated + // via sysAlloc() or reused from the readyList after a sysFault(). The memory is + // then re-mapped via sysMap(), so we can safely treat it as scavenged; the + // kernel guarantees it will be zero-filled on its next use. + h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages, userArenaChunkBytes) s.isUserArenaChunk = true s.elemsize -= userArenaChunkReserveBytes() s.freeindex = 1 diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s index df32e90fda8..03f1a46b559 100644 --- a/src/runtime/asm_386.s +++ b/src/runtime/asm_386.s @@ -597,12 +597,15 @@ CALLFN(·call268435456, 268435456) CALLFN(·call536870912, 536870912) CALLFN(·call1073741824, 1073741824) -TEXT runtime·procyield(SB),NOSPLIT,$0-0 +TEXT runtime·procyieldAsm(SB),NOSPLIT,$0-0 MOVL cycles+0(FP), AX + TESTL AX, AX + JZ done again: PAUSE SUBL $1, AX JNZ again +done: RET TEXT ·publicationBarrier(SB),NOSPLIT,$0-0 diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s index 1cd1b0afc56..ea851469368 100644 --- a/src/runtime/asm_amd64.s +++ b/src/runtime/asm_amd64.s @@ -815,12 +815,15 @@ CALLFN(·call268435456, 268435456) CALLFN(·call536870912, 536870912) CALLFN(·call1073741824, 1073741824) -TEXT runtime·procyield(SB),NOSPLIT,$0-0 +TEXT runtime·procyieldAsm(SB),NOSPLIT,$0-0 MOVL cycles+0(FP), AX + TESTL AX, AX + JZ done again: PAUSE SUBL $1, AX JNZ again +done: RET diff --git a/src/runtime/asm_arm.s b/src/runtime/asm_arm.s index d371e80d848..9373846c74a 100644 --- a/src/runtime/asm_arm.s +++ b/src/runtime/asm_arm.s @@ -839,7 +839,7 @@ TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-12 TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-12 JMP runtime·memhash64Fallback(SB) -TEXT runtime·procyield(SB),NOSPLIT|NOFRAME,$0 +TEXT runtime·procyieldAsm(SB),NOSPLIT|NOFRAME,$0 MOVW cycles+0(FP), R1 MOVW $0, R0 yieldloop: diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s index a0e82ec830f..902a7066aaa 100644 --- a/src/runtime/asm_arm64.s +++ b/src/runtime/asm_arm64.s @@ -1036,12 +1036,61 @@ aesloop: VMOV V0.D[0], R0 RET -TEXT runtime·procyield(SB),NOSPLIT,$0-0 +// The Arm architecture provides a user space accessible counter-timer which +// is incremented at a fixed but machine-specific rate. Software can (spin) +// wait until the counter-timer reaches some desired value. +// +// Armv8.7-A introduced the WFET (FEAT_WFxT) instruction, which allows the +// processor to enter a low power state for a set time, or until an event is +// received. +// +// However, WFET is not used here because it is only available on newer hardware, +// and we aim to maintain compatibility with older Armv8-A platforms that do not +// support this feature. +// +// As a fallback, we can instead use the ISB instruction to decrease processor +// activity and thus power consumption between checks of the counter-timer. +// Note that we do not depend on the latency of the ISB instruction which is +// implementation specific. Actual delay comes from comparing against a fresh +// read of the counter-timer value. +// +// Read more in this Arm blog post: +// https://community.arm.com/arm-community-blogs/b/architectures-and-processors-blog/posts/multi-threaded-applications-arm + +TEXT runtime·procyieldAsm(SB),NOSPLIT,$0-0 MOVWU cycles+0(FP), R0 -again: - YIELD - SUBW $1, R0 - CBNZ R0, again + CBZ R0, done + //Prevent speculation of subsequent counter/timer reads and memory accesses. + ISB $15 + // If the delay is very short, just return. + // Hardcode 18ns as the first ISB delay. + CMP $18, R0 + BLS done + // Adjust for overhead of initial ISB. + SUB $18, R0, R0 + // Convert the delay from nanoseconds to counter/timer ticks. + // Read the counter/timer frequency. + // delay_ticks = (delay * CNTFRQ_EL0) / 1e9 + // With the below simplifications and adjustments, + // we are usually within 2% of the correct value: + // delay_ticks = (delay + delay / 16) * CNTFRQ_EL0 >> 30 + MRS CNTFRQ_EL0, R1 + ADD R0>>4, R0, R0 + MUL R1, R0, R0 + LSR $30, R0, R0 + CBZ R0, done + // start = current counter/timer value + MRS CNTVCT_EL0, R2 +delay: + // Delay using ISB for all ticks. + ISB $15 + // Substract and compare to handle counter roll-over. + // counter_read() - start < delay_ticks + MRS CNTVCT_EL0, R1 + SUB R2, R1, R1 + CMP R0, R1 + BCC delay +done: RET // Save state of caller into g->sched, diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s index 586bf89a5d3..a6a5519afb6 100644 --- a/src/runtime/asm_loong64.s +++ b/src/runtime/asm_loong64.s @@ -505,7 +505,7 @@ CALLFN(·call268435456, 268435456) CALLFN(·call536870912, 536870912) CALLFN(·call1073741824, 1073741824) -TEXT runtime·procyield(SB),NOSPLIT,$0-0 +TEXT runtime·procyieldAsm(SB),NOSPLIT,$0-0 RET // Save state of caller into g->sched. diff --git a/src/runtime/asm_mips64x.s b/src/runtime/asm_mips64x.s index d4523b4a74c..532eca752ff 100644 --- a/src/runtime/asm_mips64x.s +++ b/src/runtime/asm_mips64x.s @@ -408,7 +408,7 @@ CALLFN(·call268435456, 268435456) CALLFN(·call536870912, 536870912) CALLFN(·call1073741824, 1073741824) -TEXT runtime·procyield(SB),NOSPLIT,$0-0 +TEXT runtime·procyieldAsm(SB),NOSPLIT,$0-0 RET // Save state of caller into g->sched, diff --git a/src/runtime/asm_mipsx.s b/src/runtime/asm_mipsx.s index ec352f58289..2dc3f1c3ad7 100644 --- a/src/runtime/asm_mipsx.s +++ b/src/runtime/asm_mipsx.s @@ -406,7 +406,7 @@ CALLFN(·call268435456, 268435456) CALLFN(·call536870912, 536870912) CALLFN(·call1073741824, 1073741824) -TEXT runtime·procyield(SB),NOSPLIT,$0-4 +TEXT runtime·procyieldAsm(SB),NOSPLIT,$0-4 RET // Save state of caller into g->sched, diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s index b42e0b62f85..aaa2e4346c7 100644 --- a/src/runtime/asm_ppc64x.s +++ b/src/runtime/asm_ppc64x.s @@ -11,11 +11,17 @@ #include "asm_ppc64x.h" #include "cgo/abi_ppc64x.h" - +// This is called using the host ABI. argc and argv arguments +// should be in R3 and R4 respectively. TEXT _rt0_ppc64x_lib(SB),NOSPLIT|NOFRAME,$0 - // This is called with ELFv2 calling conventions. Convert to Go. - // Allocate argument storage for call to newosproc0. - STACK_AND_SAVE_HOST_TO_GO_ABI(16) + // Start with standard C stack frame layout and linkage, allocate + // 16 bytes of argument space, save callee-save regs, and set R0 to $0. + // Allocate an extra 16 bytes to account for the larger fixed frame size + // of aix/elfv1 (48 vs 32) to ensure 16 bytes of parameter save space. + STACK_AND_SAVE_HOST_TO_GO_ABI(32) + // The above will not preserve R2 (TOC). Save it in case Go is + // compiled without a TOC pointer (e.g -buildmode=default). + MOVD R2, 24(R1) MOVD R3, _rt0_ppc64x_lib_argc<>(SB) MOVD R4, _rt0_ppc64x_lib_argv<>(SB) @@ -28,14 +34,28 @@ TEXT _rt0_ppc64x_lib(SB),NOSPLIT|NOFRAME,$0 MOVD R12, CTR BL (CTR) +#ifdef GOOS_aix + // See runtime/cgo/gcc_aix_ppc64.c + MOVBZ runtime·isarchive(SB), R3 // Check buildmode = c-archive + CMP $0, R3 + BEQ done +#endif + // Create a new thread to do the runtime initialization and return. + // _cgo_sys_thread_create is a C function. MOVD _cgo_sys_thread_create(SB), R12 CMP $0, R12 BEQ nocgo MOVD $_rt0_ppc64x_lib_go(SB), R3 MOVD $0, R4 +#ifdef GO_PPC64X_HAS_FUNCDESC + // Load the real entry address from the first slot of the function descriptor. + MOVD 8(R12), R2 + MOVD (R12), R12 +#endif MOVD R12, CTR BL (CTR) + MOVD 24(R1), R2 // Restore the old frame, and R2. BR done nocgo: @@ -48,8 +68,7 @@ nocgo: BL (CTR) done: - // Restore and return to ELFv2 caller. - UNSTACK_AND_RESTORE_GO_TO_HOST_ABI(16) + UNSTACK_AND_RESTORE_GO_TO_HOST_ABI(32) RET #ifdef GO_PPC64X_HAS_FUNCDESC @@ -593,8 +612,10 @@ CALLFN(·call268435456, 268435456) CALLFN(·call536870912, 536870912) CALLFN(·call1073741824, 1073741824) -TEXT runtime·procyield(SB),NOSPLIT|NOFRAME,$0-4 +TEXT runtime·procyieldAsm(SB),NOSPLIT|NOFRAME,$0-4 MOVW cycles+0(FP), R7 + CMP $0, R7 + BEQ done // POWER does not have a pause/yield instruction equivalent. // Instead, we can lower the program priority by setting the // Program Priority Register prior to the wait loop and set it @@ -606,6 +627,7 @@ again: CMP $0, R7 BNE again OR R6, R6, R6 // Set PPR priority back to medium-low +done: RET // Save state of caller into g->sched, diff --git a/src/runtime/asm_riscv64.s b/src/runtime/asm_riscv64.s index 6b16d03c9a8..5bd16181ee2 100644 --- a/src/runtime/asm_riscv64.s +++ b/src/runtime/asm_riscv64.s @@ -6,6 +6,104 @@ #include "funcdata.h" #include "textflag.h" + +// When building with -buildmode=c-shared, this symbol is called when the shared +// library is loaded. +TEXT _rt0_riscv64_lib(SB),NOSPLIT,$224 + // Preserve callee-save registers, along with X1 (LR). + MOV X1, (8*3)(X2) + MOV X8, (8*4)(X2) + MOV X9, (8*5)(X2) + MOV X18, (8*6)(X2) + MOV X19, (8*7)(X2) + MOV X20, (8*8)(X2) + MOV X21, (8*9)(X2) + MOV X22, (8*10)(X2) + MOV X23, (8*11)(X2) + MOV X24, (8*12)(X2) + MOV X25, (8*13)(X2) + MOV X26, (8*14)(X2) + MOV g, (8*15)(X2) + MOVD F8, (8*16)(X2) + MOVD F9, (8*17)(X2) + MOVD F18, (8*18)(X2) + MOVD F19, (8*19)(X2) + MOVD F20, (8*20)(X2) + MOVD F21, (8*21)(X2) + MOVD F22, (8*22)(X2) + MOVD F23, (8*23)(X2) + MOVD F24, (8*24)(X2) + MOVD F25, (8*25)(X2) + MOVD F26, (8*26)(X2) + MOVD F27, (8*27)(X2) + + // Initialize g as nil in case of using g later e.g. sigaction in cgo_sigaction.go + MOV X0, g + + MOV A0, _rt0_riscv64_lib_argc<>(SB) + MOV A1, _rt0_riscv64_lib_argv<>(SB) + + // Synchronous initialization. + MOV $runtime·libpreinit(SB), T0 + JALR RA, T0 + + // Create a new thread to do the runtime initialization and return. + MOV _cgo_sys_thread_create(SB), T0 + BEQZ T0, nocgo + MOV $_rt0_riscv64_lib_go(SB), A0 + MOV $0, A1 + JALR RA, T0 + JMP restore + +nocgo: + MOV $0x800000, A0 // stacksize = 8192KB + MOV $_rt0_riscv64_lib_go(SB), A1 + MOV A0, 8(X2) + MOV A1, 16(X2) + MOV $runtime·newosproc0(SB), T0 + JALR RA, T0 + +restore: + // Restore callee-save registers, along with X1 (LR). + MOV (8*3)(X2), X1 + MOV (8*4)(X2), X8 + MOV (8*5)(X2), X9 + MOV (8*6)(X2), X18 + MOV (8*7)(X2), X19 + MOV (8*8)(X2), X20 + MOV (8*9)(X2), X21 + MOV (8*10)(X2), X22 + MOV (8*11)(X2), X23 + MOV (8*12)(X2), X24 + MOV (8*13)(X2), X25 + MOV (8*14)(X2), X26 + MOV (8*15)(X2), g + MOVD (8*16)(X2), F8 + MOVD (8*17)(X2), F9 + MOVD (8*18)(X2), F18 + MOVD (8*19)(X2), F19 + MOVD (8*20)(X2), F20 + MOVD (8*21)(X2), F21 + MOVD (8*22)(X2), F22 + MOVD (8*23)(X2), F23 + MOVD (8*24)(X2), F24 + MOVD (8*25)(X2), F25 + MOVD (8*26)(X2), F26 + MOVD (8*27)(X2), F27 + + RET + +TEXT _rt0_riscv64_lib_go(SB),NOSPLIT,$0 + MOV _rt0_riscv64_lib_argc<>(SB), A0 + MOV _rt0_riscv64_lib_argv<>(SB), A1 + MOV $runtime·rt0_go(SB), T0 + JALR ZERO, T0 + +DATA _rt0_riscv64_lib_argc<>(SB)/8, $0 +GLOBL _rt0_riscv64_lib_argc<>(SB),NOPTR, $8 +DATA _rt0_riscv64_lib_argv<>(SB)/8, $0 +GLOBL _rt0_riscv64_lib_argv<>(SB),NOPTR, $8 + // func rt0_go() TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0 // X2 = stack; A0 = argc; A1 = argv @@ -269,8 +367,8 @@ TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0 MOV gobuf_pc(T0), T0 JALR ZERO, T0 -// func procyield(cycles uint32) -TEXT runtime·procyield(SB),NOSPLIT,$0-0 +// func procyieldAsm(cycles uint32) +TEXT runtime·procyieldAsm(SB),NOSPLIT,$0-0 RET // Switch to m->g0's stack, call fn(g). diff --git a/src/runtime/asm_s390x.s b/src/runtime/asm_s390x.s index 4cc1c0eb104..bb29845f583 100644 --- a/src/runtime/asm_s390x.s +++ b/src/runtime/asm_s390x.s @@ -506,7 +506,7 @@ CALLFN(·call1073741824, 1073741824) TEXT callfnMVC<>(SB),NOSPLIT|NOFRAME,$0-0 MVC $1, 0(R4), 0(R6) -TEXT runtime·procyield(SB),NOSPLIT,$0-0 +TEXT runtime·procyieldAsm(SB),NOSPLIT,$0-0 RET // Save state of caller into g->sched, diff --git a/src/runtime/asm_wasm.s b/src/runtime/asm_wasm.s index 85aa52e0f79..c46cb4ae464 100644 --- a/src/runtime/asm_wasm.s +++ b/src/runtime/asm_wasm.s @@ -200,7 +200,7 @@ TEXT runtime·asminit(SB), NOSPLIT, $0-0 TEXT ·publicationBarrier(SB), NOSPLIT, $0-0 RET -TEXT runtime·procyield(SB), NOSPLIT, $0-0 // FIXME +TEXT runtime·procyieldAsm(SB), NOSPLIT, $0-0 // FIXME RET TEXT runtime·breakpoint(SB), NOSPLIT, $0-0 diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go index 18e1dc8bafc..a53fd6da340 100644 --- a/src/runtime/cgocall.go +++ b/src/runtime/cgocall.go @@ -33,7 +33,7 @@ // // To make it possible for gcc-compiled C code to call a Go function p.GoF, // cgo writes a gcc-compiled function named GoF (not p.GoF, since gcc doesn't -// know about packages). The gcc-compiled C function f calls GoF. +// know about packages). The gcc-compiled C function f calls GoF. // // GoF initializes "frame", a structure containing all of its // arguments and slots for p.GoF's results. It calls @@ -58,10 +58,10 @@ // m.g0 stack, so that it can be restored later. // // runtime.cgocallbackg (below) is now running on a real goroutine -// stack (not an m.g0 stack). First it calls runtime.exitsyscall, which will +// stack (not an m.g0 stack). First it calls runtime.exitsyscall, which will // block until the $GOMAXPROCS limit allows running this goroutine. // Once exitsyscall has returned, it is safe to do things like call the memory -// allocator or invoke the Go callback function. runtime.cgocallbackg +// allocator or invoke the Go callback function. runtime.cgocallbackg // first defers a function to unwind m.g0.sched.sp, so that if p.GoF // panics, m.g0.sched.sp will be restored to its old value: the m.g0 stack // and the m.curg stack will be unwound in lock step. @@ -393,7 +393,7 @@ func cgocallbackg1(fn, frame unsafe.Pointer, ctxt uintptr) { // Now we need to set gp.cgoCtxt = s, but we could get // a SIGPROF signal while manipulating the slice, and // the SIGPROF handler could pick up gp.cgoCtxt while - // tracing up the stack. We need to ensure that the + // tracing up the stack. We need to ensure that the // handler always sees a valid slice, so set the // values in an order such that it always does. p := (*slice)(unsafe.Pointer(&gp.cgoCtxt)) @@ -591,15 +591,18 @@ func cgoCheckPointer(ptr any, arg any) { cgoCheckArg(t, ep.data, !t.IsDirectIface(), top, cgoCheckPointerFail) } -const cgoCheckPointerFail = "cgo argument has Go pointer to unpinned Go pointer" -const cgoResultFail = "cgo result is unpinned Go pointer or points to unpinned Go pointer" +type cgoErrorMsg int +const ( + cgoCheckPointerFail cgoErrorMsg = iota + cgoResultFail +) -// cgoCheckArg is the real work of cgoCheckPointer. The argument p -// is either a pointer to the value (of type t), or the value itself, -// depending on indir. The top parameter is whether we are at the top +// cgoCheckArg is the real work of cgoCheckPointer and cgoCheckResult. +// The argument p is either a pointer to the value (of type t), or the value +// itself, depending on indir. The top parameter is whether we are at the top // level, where Go pointers are allowed. Go pointers to pinned objects are // allowed as long as they don't reference other unpinned pointers. -func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { +func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg cgoErrorMsg) { if !t.Pointers() || p == nil { // If the type has no pointers there is nothing to do. return @@ -625,7 +628,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { // These types contain internal pointers that will // always be allocated in the Go heap. It's never OK // to pass them to C. - panic(errorString(msg)) + panic(cgoFormatErr(msg, t.Kind())) case abi.Func: if indir { p = *(*unsafe.Pointer)(p) @@ -633,7 +636,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { if !cgoIsGoPointer(p) { return } - panic(errorString(msg)) + panic(cgoFormatErr(msg, t.Kind())) case abi.Interface: it := *(**_type)(p) if it == nil { @@ -643,14 +646,14 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { // constant. A type not known at compile time will be // in the heap and will not be OK. if inheap(uintptr(unsafe.Pointer(it))) { - panic(errorString(msg)) + panic(cgoFormatErr(msg, t.Kind())) } p = *(*unsafe.Pointer)(add(p, goarch.PtrSize)) if !cgoIsGoPointer(p) { return } if !top && !isPinned(p) { - panic(errorString(msg)) + panic(cgoFormatErr(msg, t.Kind())) } cgoCheckArg(it, p, !it.IsDirectIface(), false, msg) case abi.Slice: @@ -661,7 +664,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { return } if !top && !isPinned(p) { - panic(errorString(msg)) + panic(cgoFormatErr(msg, t.Kind())) } if !st.Elem.Pointers() { return @@ -676,7 +679,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { return } if !top && !isPinned(ss.str) { - panic(errorString(msg)) + panic(cgoFormatErr(msg, t.Kind())) } case abi.Struct: st := (*structtype)(unsafe.Pointer(t)) @@ -705,7 +708,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { return } if !top && !isPinned(p) { - panic(errorString(msg)) + panic(cgoFormatErr(msg, t.Kind())) } cgoCheckUnknownPointer(p, msg) @@ -716,7 +719,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { // memory. It checks whether that Go memory contains any other // pointer into unpinned Go memory. If it does, we panic. // The return values are unused but useful to see in panic tracebacks. -func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr) { +func cgoCheckUnknownPointer(p unsafe.Pointer, msg cgoErrorMsg) (base, i uintptr) { if inheap(uintptr(p)) { b, span, _ := findObject(uintptr(p), 0, 0) base = b @@ -731,7 +734,7 @@ func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr) { } pp := *(*unsafe.Pointer)(unsafe.Pointer(addr)) if cgoIsGoPointer(pp) && !isPinned(pp) { - panic(errorString(msg)) + panic(cgoFormatErr(msg, abi.Pointer)) } } return @@ -741,7 +744,7 @@ func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr) { if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) { // We have no way to know the size of the object. // We have to assume that it might contain a pointer. - panic(errorString(msg)) + panic(cgoFormatErr(msg, abi.Pointer)) } // In the text or noptr sections, we know that the // pointer does not point to a Go pointer. @@ -794,3 +797,72 @@ func cgoCheckResult(val any) { t := ep._type cgoCheckArg(t, ep.data, !t.IsDirectIface(), false, cgoResultFail) } + +// cgoFormatErr is called by cgoCheckArg and cgoCheckUnknownPointer +// to format panic error messages. +func cgoFormatErr(error cgoErrorMsg, kind abi.Kind) errorString { + var msg, kindname string + var cgoFunction string = "unknown" + var offset int + var buf [20]byte + + // We expect one of these abi.Kind from cgoCheckArg + switch kind { + case abi.Chan: + kindname = "channel" + case abi.Func: + kindname = "function" + case abi.Interface: + kindname = "interface" + case abi.Map: + kindname = "map" + case abi.Pointer: + kindname = "pointer" + case abi.Slice: + kindname = "slice" + case abi.String: + kindname = "string" + case abi.Struct: + kindname = "struct" + case abi.UnsafePointer: + kindname = "unsafe pointer" + default: + kindname = "pointer" + } + + // The cgo function name might need an offset to be obtained + if error == cgoResultFail { + offset = 21 + } + + // Relatively to cgoFormatErr, this is the stack frame: + // 0. cgoFormatErr + // 1. cgoCheckArg or cgoCheckUnknownPointer + // 2. cgoCheckPointer or cgoCheckResult + // 3. cgo function + pc, path, line, ok := Caller(3) + if ok && error == cgoResultFail { + function := FuncForPC(pc) + + if function != nil { + // Expected format of cgo function name: + // - caller: _cgoexp_3c910ddb72c4_foo + if offset > len(function.Name()) { + cgoFunction = function.Name() + } else { + cgoFunction = function.Name()[offset:] + } + } + } + + switch error { + case cgoResultFail: + msg = path + ":" + string(itoa(buf[:], uint64(line))) + msg += ": result of Go function " + cgoFunction + " called from cgo" + msg += " is unpinned Go " + kindname + " or points to unpinned Go " + kindname + case cgoCheckPointerFail: + msg += "argument of cgo function has Go pointer to unpinned Go " + kindname + } + + return errorString(msg) +} diff --git a/src/runtime/cgocheck.go b/src/runtime/cgocheck.go index ab804a5a36e..81300adfcf1 100644 --- a/src/runtime/cgocheck.go +++ b/src/runtime/cgocheck.go @@ -177,27 +177,3 @@ func cgoCheckBits(src unsafe.Pointer, gcbits *byte, off, size uintptr) { } } } - -// cgoCheckUsingType is like cgoCheckTypedBlock, but is a last ditch -// fall back to look for pointers in src using the type information. -// We only use this when looking at a value on the stack when the type -// uses a GC program, because otherwise it's more efficient to use the -// GC bits. This is called on the system stack. -// -//go:nowritebarrier -//go:systemstack -func cgoCheckUsingType(typ *_type, src unsafe.Pointer, off, size uintptr) { - if !typ.Pointers() { - return - } - - // Anything past typ.PtrBytes is not a pointer. - if typ.PtrBytes <= off { - return - } - if ptrdataSize := typ.PtrBytes - off; size > ptrdataSize { - size = ptrdataSize - } - - cgoCheckBits(src, getGCMask(typ), off, size) -} diff --git a/src/runtime/chan_test.go b/src/runtime/chan_test.go index 526d45bb430..5a1ca52a8c3 100644 --- a/src/runtime/chan_test.go +++ b/src/runtime/chan_test.go @@ -309,7 +309,6 @@ func TestSelfSelect(t *testing.T) { wg.Add(2) c := make(chan int, chanCap) for p := 0; p < 2; p++ { - p := p go func() { defer wg.Done() for i := 0; i < 1000; i++ { @@ -359,7 +358,6 @@ func TestSelectStress(t *testing.T) { var wg sync.WaitGroup wg.Add(10) for k := 0; k < 4; k++ { - k := k go func() { for i := 0; i < N; i++ { c[k] <- 0 diff --git a/src/runtime/checkptr_test.go b/src/runtime/checkptr_test.go index 119708be7f5..d08b0524499 100644 --- a/src/runtime/checkptr_test.go +++ b/src/runtime/checkptr_test.go @@ -45,7 +45,6 @@ func TestCheckPtr(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.cmd, func(t *testing.T) { t.Parallel() got, err := testenv.CleanCmdEnv(exec.Command(exe, tc.cmd)).CombinedOutput() @@ -88,7 +87,6 @@ func TestCheckPtr2(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.cmd, func(t *testing.T) { t.Parallel() got, err := testenv.CleanCmdEnv(exec.Command(exe, tc.cmd)).CombinedOutput() diff --git a/src/runtime/crash_cgo_test.go b/src/runtime/crash_cgo_test.go index b77ff8dafdd..baf4523a7ae 100644 --- a/src/runtime/crash_cgo_test.go +++ b/src/runtime/crash_cgo_test.go @@ -752,8 +752,6 @@ func TestSegv(t *testing.T) { } for _, test := range []string{"Segv", "SegvInCgo", "TgkillSegv", "TgkillSegvInCgo"} { - test := test - // The tgkill variants only run on Linux. if runtime.GOOS != "linux" && strings.HasPrefix(test, "Tgkill") { continue diff --git a/src/runtime/defer_test.go b/src/runtime/defer_test.go index e3d0d077685..5431c511342 100644 --- a/src/runtime/defer_test.go +++ b/src/runtime/defer_test.go @@ -150,7 +150,7 @@ func TestAbortedPanic(t *testing.T) { // This tests that recover() does not succeed unless it is called directly from a // defer function that is directly called by the panic. Here, we first call it // from a defer function that is created by the defer function called directly by -// the panic. In +// the panic. func TestRecoverMatching(t *testing.T) { defer func() { r := recover() diff --git a/src/runtime/defs1_netbsd_386.go b/src/runtime/defs1_netbsd_386.go index 16c55def926..cb4d6f1cf14 100644 --- a/src/runtime/defs1_netbsd_386.go +++ b/src/runtime/defs1_netbsd_386.go @@ -121,7 +121,8 @@ type timespec struct { //go:nosplit func (ts *timespec) setNsec(ns int64) { - ts.tv_sec = int64(timediv(ns, 1e9, &ts.tv_nsec)) + ts.tv_sec = int64(ns / 1e9) + ts.tv_nsec = int32(ns % 1e9) } type timeval struct { diff --git a/src/runtime/defs1_netbsd_arm.go b/src/runtime/defs1_netbsd_arm.go index 77a59d4a05b..d31fcd471d9 100644 --- a/src/runtime/defs1_netbsd_arm.go +++ b/src/runtime/defs1_netbsd_arm.go @@ -123,7 +123,8 @@ type timespec struct { //go:nosplit func (ts *timespec) setNsec(ns int64) { - ts.tv_sec = int64(timediv(ns, 1e9, &ts.tv_nsec)) + ts.tv_sec = int64(ns / 1e9) + ts.tv_nsec = int32(ns % 1e9) } type timeval struct { diff --git a/src/runtime/defs_freebsd_386.go b/src/runtime/defs_freebsd_386.go index 42a0faf74dc..20ac643ad7a 100644 --- a/src/runtime/defs_freebsd_386.go +++ b/src/runtime/defs_freebsd_386.go @@ -210,7 +210,8 @@ type timespec struct { //go:nosplit func (ts *timespec) setNsec(ns int64) { - ts.tv_sec = timediv(ns, 1e9, &ts.tv_nsec) + ts.tv_sec = int32(ns / 1e9) + ts.tv_nsec = int32(ns % 1e9) } type timeval struct { diff --git a/src/runtime/defs_freebsd_arm.go b/src/runtime/defs_freebsd_arm.go index dbb54da51bd..cf61fcade56 100644 --- a/src/runtime/defs_freebsd_arm.go +++ b/src/runtime/defs_freebsd_arm.go @@ -182,7 +182,8 @@ type timespec struct { //go:nosplit func (ts *timespec) setNsec(ns int64) { - ts.tv_sec = int64(timediv(ns, 1e9, &ts.tv_nsec)) + ts.tv_sec = int64(ns / 1e9) + ts.tv_nsec = int32(ns % 1e9) } type timeval struct { diff --git a/src/runtime/defs_linux_386.go b/src/runtime/defs_linux_386.go index e902d8175c3..d1875954f31 100644 --- a/src/runtime/defs_linux_386.go +++ b/src/runtime/defs_linux_386.go @@ -146,7 +146,8 @@ type timespec32 struct { //go:nosplit func (ts *timespec32) setNsec(ns int64) { - ts.tv_sec = timediv(ns, 1e9, &ts.tv_nsec) + ts.tv_sec = int32(ns / 1e9) + ts.tv_nsec = int32(ns % 1e9) } type timespec struct { @@ -156,9 +157,8 @@ type timespec struct { //go:nosplit func (ts *timespec) setNsec(ns int64) { - var newNS int32 - ts.tv_sec = int64(timediv(ns, 1e9, &newNS)) - ts.tv_nsec = int64(newNS) + ts.tv_sec = int64(ns / 1e9) + ts.tv_nsec = int64(ns % 1e9) } type timeval struct { @@ -237,10 +237,14 @@ type ucontext struct { uc_sigmask uint32 } -type itimerspec struct { +type itimerspec32 struct { it_interval timespec32 it_value timespec32 } +type itimerspec struct { + it_interval timespec + it_value timespec +} type itimerval struct { it_interval timeval diff --git a/src/runtime/defs_linux_arm.go b/src/runtime/defs_linux_arm.go index 35c4faf9640..94577fc5971 100644 --- a/src/runtime/defs_linux_arm.go +++ b/src/runtime/defs_linux_arm.go @@ -105,7 +105,8 @@ type timespec32 struct { //go:nosplit func (ts *timespec32) setNsec(ns int64) { - ts.tv_sec = timediv(ns, 1e9, &ts.tv_nsec) + ts.tv_sec = int32(ns / 1e9) + ts.tv_nsec = int32(ns % 1e9) } type timespec struct { @@ -115,9 +116,8 @@ type timespec struct { //go:nosplit func (ts *timespec) setNsec(ns int64) { - var newNS int32 - ts.tv_sec = int64(timediv(ns, 1e9, &newNS)) - ts.tv_nsec = int64(newNS) + ts.tv_sec = int64(ns / 1e9) + ts.tv_nsec = int64(ns % 1e9) } type stackt struct { @@ -169,11 +169,16 @@ func (tv *timeval) set_usec(x int32) { tv.tv_usec = x } -type itimerspec struct { +type itimerspec32 struct { it_interval timespec32 it_value timespec32 } +type itimerspec struct { + it_interval timespec + it_value timespec +} + type itimerval struct { it_interval timeval it_value timeval diff --git a/src/runtime/defs_linux_mipsx.go b/src/runtime/defs_linux_mipsx.go index cec504c8856..5a446e0595f 100644 --- a/src/runtime/defs_linux_mipsx.go +++ b/src/runtime/defs_linux_mipsx.go @@ -103,7 +103,8 @@ type timespec32 struct { //go:nosplit func (ts *timespec32) setNsec(ns int64) { - ts.tv_sec = timediv(ns, 1e9, &ts.tv_nsec) + ts.tv_sec = int32(ns / 1e9) + ts.tv_nsec = int32(ns % 1e9) } type timespec struct { @@ -113,9 +114,8 @@ type timespec struct { //go:nosplit func (ts *timespec) setNsec(ns int64) { - var newNS int32 - ts.tv_sec = int64(timediv(ns, 1e9, &newNS)) - ts.tv_nsec = int64(newNS) + ts.tv_sec = int64(ns / 1e9) + ts.tv_nsec = int64(ns % 1e9) } type timeval struct { @@ -152,11 +152,16 @@ type siginfo struct { _ [_si_max_size - unsafe.Sizeof(siginfoFields{})]byte } -type itimerspec struct { +type itimerspec32 struct { it_interval timespec32 it_value timespec32 } +type itimerspec struct { + it_interval timespec + it_value timespec +} + type itimerval struct { it_interval timeval it_value timeval diff --git a/src/runtime/defs_openbsd_386.go b/src/runtime/defs_openbsd_386.go index 996745f6f8e..c77b5e16cb6 100644 --- a/src/runtime/defs_openbsd_386.go +++ b/src/runtime/defs_openbsd_386.go @@ -147,7 +147,8 @@ type timespec struct { //go:nosplit func (ts *timespec) setNsec(ns int64) { - ts.tv_sec = int64(timediv(ns, 1e9, &ts.tv_nsec)) + ts.tv_sec = int64(ns / 1e9) + ts.tv_nsec = int32(ns % 1e9) } type timeval struct { diff --git a/src/runtime/defs_openbsd_arm.go b/src/runtime/defs_openbsd_arm.go index cdda6b4ad1a..5393ea4eeba 100644 --- a/src/runtime/defs_openbsd_arm.go +++ b/src/runtime/defs_openbsd_arm.go @@ -152,7 +152,8 @@ type timespec struct { //go:nosplit func (ts *timespec) setNsec(ns int64) { - ts.tv_sec = int64(timediv(ns, 1e9, &ts.tv_nsec)) + ts.tv_sec = int64(ns / 1e9) + ts.tv_nsec = int32(ns % 1e9) } type timeval struct { diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index ae222631722..3a781b7551f 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -1472,8 +1472,6 @@ func Releasem() { releasem(getg().m) } -var Timediv = timediv - type PIController struct { piController } @@ -1942,3 +1940,51 @@ var X86HasAVX = &x86HasAVX var DebugDecorateMappings = &debug.decoratemappings func SetVMANameSupported() bool { return setVMANameSupported() } + +type ListHead struct { + l listHead +} + +func (head *ListHead) Init(off uintptr) { + head.l.init(off) +} + +type ListNode struct { + l listNode +} + +func (head *ListHead) Push(p unsafe.Pointer) { + head.l.push(p) +} + +func (head *ListHead) Pop() unsafe.Pointer { + return head.l.pop() +} + +func (head *ListHead) Remove(p unsafe.Pointer) { + head.l.remove(p) +} + +type ListHeadManual struct { + l listHeadManual +} + +func (head *ListHeadManual) Init(off uintptr) { + head.l.init(off) +} + +type ListNodeManual struct { + l listNodeManual +} + +func (head *ListHeadManual) Push(p unsafe.Pointer) { + head.l.push(p) +} + +func (head *ListHeadManual) Pop() unsafe.Pointer { + return head.l.pop() +} + +func (head *ListHeadManual) Remove(p unsafe.Pointer) { + head.l.remove(p) +} diff --git a/src/runtime/extern.go b/src/runtime/extern.go index 62dab74bc0c..a9f97a1a045 100644 --- a/src/runtime/extern.go +++ b/src/runtime/extern.go @@ -171,13 +171,6 @@ It is a comma-separated list of name=val pairs setting these named variables: silently default to 1024. Future versions of Go may remove this limitation and extend profstackdepth to apply to the CPU profiler and execution tracer. - pagetrace: setting pagetrace=/path/to/file will write out a trace of page events - that can be viewed, analyzed, and visualized using the x/debug/cmd/pagetrace tool. - Build your program with GOEXPERIMENT=pagetrace to enable this functionality. Do not - enable this functionality if your program is a setuid binary as it introduces a security - risk in that scenario. Currently not supported on Windows, plan9 or js/wasm. Setting this - option for some applications can produce large traces, so use with care. - panicnil: setting panicnil=1 disables the runtime error when calling panic with nil interface value or an untyped nil. @@ -218,10 +211,11 @@ It is a comma-separated list of name=val pairs setting these named variables: report. This also extends the information returned by runtime.Stack. Setting N to 0 will report no ancestry information. - tracefpunwindoff: setting tracefpunwindoff=1 forces the execution tracer to - use the runtime's default stack unwinder instead of frame pointer unwinding. - This increases tracer overhead, but could be helpful as a workaround or for - debugging unexpected regressions caused by frame pointer unwinding. + tracefpunwindoff: setting tracefpunwindoff=1 forces the execution tracer + and block and mutex profilers to use the runtime's default stack + unwinder instead of frame pointer unwinding. This increases their + overhead, but could be helpful as a workaround or for debugging + unexpected regressions caused by frame pointer unwinding. traceadvanceperiod: the approximate period in nanoseconds between trace generations. Only applies if a program is built with GOEXPERIMENT=exectracer2. Used primarily for testing diff --git a/src/runtime/funcdata.h b/src/runtime/funcdata.h index 4bbc58ea488..236f3a3369d 100644 --- a/src/runtime/funcdata.h +++ b/src/runtime/funcdata.h @@ -12,6 +12,7 @@ #define PCDATA_StackMapIndex 1 #define PCDATA_InlTreeIndex 2 #define PCDATA_ArgLiveIndex 3 +#define PCDATA_PanicBounds 4 #define FUNCDATA_ArgsPointerMaps 0 /* garbage collector blocks */ #define FUNCDATA_LocalsPointerMaps 1 diff --git a/src/runtime/goroutineleakprofile_test.go b/src/runtime/goroutineleakprofile_test.go index 6e26bcab132..f5d2dd6372e 100644 --- a/src/runtime/goroutineleakprofile_test.go +++ b/src/runtime/goroutineleakprofile_test.go @@ -487,7 +487,7 @@ func TestGoroutineLeakProfile(t *testing.T) { testCases = append(testCases, patternTestCases...) // Test cases must not panic or cause fatal exceptions. - failStates := regexp.MustCompile(`fatal|panic`) + failStates := regexp.MustCompile(`fatal|panic|DATA RACE`) testApp := func(exepath string, testCases []testCase) { @@ -520,9 +520,9 @@ func TestGoroutineLeakProfile(t *testing.T) { t.Errorf("Test %s produced no output. Is the goroutine leak profile collected?", tcase.name) } - // Zero tolerance policy for fatal exceptions or panics. + // Zero tolerance policy for fatal exceptions, panics, or data races. if failStates.MatchString(runOutput) { - t.Errorf("unexpected fatal exception or panic!\noutput:\n%s\n\n", runOutput) + t.Errorf("unexpected fatal exception or panic\noutput:\n%s\n\n", runOutput) } output += runOutput + "\n\n" @@ -540,7 +540,7 @@ func TestGoroutineLeakProfile(t *testing.T) { unexpectedLeaks := make([]string, 0, len(foundLeaks)) // Parse every leak and check if it is expected (maybe as a flaky leak). - LEAKS: + leaks: for _, leak := range foundLeaks { // Check if the leak is expected. // If it is, check whether it has been encountered before. @@ -569,7 +569,7 @@ func TestGoroutineLeakProfile(t *testing.T) { for flakyLeak := range tcase.flakyLeaks { if flakyLeak.MatchString(leak) { // The leak is flaky. Carry on to the next line. - continue LEAKS + continue leaks } } diff --git a/src/runtime/hash32.go b/src/runtime/hash32.go index 0616c7dd050..206a308f12e 100644 --- a/src/runtime/hash32.go +++ b/src/runtime/hash32.go @@ -5,7 +5,7 @@ // Hashing algorithm inspired by // wyhash: https://github.com/wangyi-fudan/wyhash/blob/ceb019b530e2c1c14d70b79bfa2bc49de7d95bc1/Modern%20Non-Cryptographic%20Hash%20Function%20and%20Pseudorandom%20Number%20Generator.pdf -//go:build 386 || arm || mips || mipsle +//go:build 386 || arm || mips || mipsle || wasm || (gccgo && (ppc || s390)) package runtime diff --git a/src/runtime/hash64.go b/src/runtime/hash64.go index 124bb7d77ac..ac26e660c6d 100644 --- a/src/runtime/hash64.go +++ b/src/runtime/hash64.go @@ -5,7 +5,7 @@ // Hashing algorithm inspired by // wyhash: https://github.com/wangyi-fudan/wyhash -//go:build amd64 || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || wasm +//go:build amd64 || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x package runtime diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go index d9474034c27..3671c65ab73 100644 --- a/src/runtime/heapdump.go +++ b/src/runtime/heapdump.go @@ -412,10 +412,18 @@ func dumpgs() { forEachG(func(gp *g) { status := readgstatus(gp) // The world is stopped so gp will not be in a scan state. switch status { + case _Grunning: + // Dump goroutine if it's _Grunning only during a syscall. This is safe + // because the goroutine will just park without mutating its stack, since + // the world is stopped. + if gp.syscallsp != 0 { + dumpgoroutine(gp) + } + fallthrough default: print("runtime: unexpected G.status ", hex(status), "\n") throw("dumpgs in STW - bad status") - case _Gdead: + case _Gdead, _Gdeadextra: // ok case _Grunnable, _Gsyscall, diff --git a/src/runtime/iface_test.go b/src/runtime/iface_test.go index 06f6eeb9524..5bc209cfcf0 100644 --- a/src/runtime/iface_test.go +++ b/src/runtime/iface_test.go @@ -60,7 +60,7 @@ func TestCmpIfaceConcreteAlloc(t *testing.T) { t.Skip("skipping on non-gc compiler") } - n := testing.AllocsPerRun(1, func() { + n := testing.AllocsPerRun(100, func() { _ = e == ts _ = i1 == ts _ = e == 1 diff --git a/src/runtime/lfstack.go b/src/runtime/lfstack.go index 8946c803485..1e2f5a29654 100644 --- a/src/runtime/lfstack.go +++ b/src/runtime/lfstack.go @@ -34,6 +34,11 @@ func (head *lfstack) push(node *lfnode) { } func (head *lfstack) pop() unsafe.Pointer { + var backoff uint32 + // TODO: tweak backoff parameters on other architectures. + if GOARCH == "arm64" { + backoff = 128 + } for { old := atomic.Load64((*uint64)(head)) if old == 0 { @@ -44,6 +49,16 @@ func (head *lfstack) pop() unsafe.Pointer { if atomic.Cas64((*uint64)(head), old, next) { return unsafe.Pointer(node) } + + // Use a backoff approach to reduce demand to the shared memory location + // decreases memory contention and allows for other threads to make quicker + // progress. + // Read more in this Arm blog post: + // https://community.arm.com/arm-community-blogs/b/architectures-and-processors-blog/posts/multi-threaded-applications-arm + procyield(backoff) + // Increase backoff time. + backoff += backoff / 2 + } } diff --git a/src/runtime/list.go b/src/runtime/list.go new file mode 100644 index 00000000000..c900ad7ff3f --- /dev/null +++ b/src/runtime/list.go @@ -0,0 +1,136 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "unsafe" +) + +// listHead points to the head of an intrusive doubly-linked list. +// +// Prior to use, you must call init to store the offset of listNode fields. +// +// Every object in the list should be the same type. +type listHead struct { + obj unsafe.Pointer + + initialized bool + nodeOffset uintptr +} + +// init initializes the list head. off is the offset (via unsafe.Offsetof) of +// the listNode field in the objects in the list. +func (head *listHead) init(off uintptr) { + head.initialized = true + head.nodeOffset = off +} + +// listNode is the linked list node for objects in a listHead list. +// +// listNode must be stored as a field in objects placed in the linked list. The +// offset of the field is registered via listHead.init. +// +// For example: +// +// type foo struct { +// val int +// +// node listNode +// } +// +// var fooHead listHead +// fooHead.init(unsafe.Offsetof(foo{}.node)) +type listNode struct { + prev unsafe.Pointer + next unsafe.Pointer +} + +func (head *listHead) getNode(p unsafe.Pointer) *listNode { + if !head.initialized { + throw("runtime: uninitialized listHead") + } + + if p == nil { + return nil + } + return (*listNode)(unsafe.Add(p, head.nodeOffset)) +} + +// Returns true if the list is empty. +func (head *listHead) empty() bool { + return head.obj == nil +} + +// Returns the head of the list without removing it. +func (head *listHead) head() unsafe.Pointer { + return head.obj +} + +// Push p onto the front of the list. +func (head *listHead) push(p unsafe.Pointer) { + // p becomes the head of the list. + + // ... so p's next is the current head. + pNode := head.getNode(p) + pNode.next = head.obj + + // ... and the current head's prev is p. + if head.obj != nil { + headNode := head.getNode(head.obj) + headNode.prev = p + } + + head.obj = p +} + +// Pop removes the head of the list. +func (head *listHead) pop() unsafe.Pointer { + if head.obj == nil { + return nil + } + + // Return the head of the list. + p := head.obj + + // ... so the new head is p's next. + pNode := head.getNode(p) + head.obj = pNode.next + // p is no longer on the list. Clear next to remove unused references. + // N.B. as the head, prev must already be nil. + pNode.next = nil + + // ... and the new head no longer has a prev. + if head.obj != nil { + headNode := head.getNode(head.obj) + headNode.prev = nil + } + + return p +} + +// Remove p from the middle of the list. +func (head *listHead) remove(p unsafe.Pointer) { + if head.obj == p { + // Use pop to ensure head is updated when removing the head. + head.pop() + return + } + + pNode := head.getNode(p) + prevNode := head.getNode(pNode.prev) + nextNode := head.getNode(pNode.next) + + // Link prev to next. + if prevNode != nil { + prevNode.next = pNode.next + } + // Link next to prev. + if nextNode != nil { + nextNode.prev = pNode.prev + } + + pNode.prev = nil + pNode.next = nil +} diff --git a/src/runtime/list_manual.go b/src/runtime/list_manual.go new file mode 100644 index 00000000000..af0ae6b2d65 --- /dev/null +++ b/src/runtime/list_manual.go @@ -0,0 +1,143 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "unsafe" +) + +// The types in this file are exact copies of the types in list.go, but with +// unsafe.Pointer replaced with uintptr for use where write barriers must be +// avoided, such as uses of muintptr, puintptr, guintptr. +// +// Objects in these lists must be kept alive via another real reference. + +// listHeadManual points to the head of an intrusive doubly-linked list of +// objects. +// +// Prior to use, you must call init to store the offset of listNodeManual fields. +// +// Every object in the list should be the same type. +type listHeadManual struct { + obj uintptr + + initialized bool + nodeOffset uintptr +} + +// init initializes the list head. off is the offset (via unsafe.Offsetof) of +// the listNodeManual field in the objects in the list. +func (head *listHeadManual) init(off uintptr) { + head.initialized = true + head.nodeOffset = off +} + +// listNodeManual is the linked list node for objects in a listHeadManual list. +// +// listNodeManual must be stored as a field in objects placed in the linked list. +// The offset of the field is registered via listHeadManual.init. +// +// For example: +// +// type foo struct { +// val int +// +// node listNodeManual +// } +// +// var fooHead listHeadManual +// fooHead.init(unsafe.Offsetof(foo{}.node)) +type listNodeManual struct { + prev uintptr + next uintptr +} + +func (head *listHeadManual) getNode(p unsafe.Pointer) *listNodeManual { + if !head.initialized { + throw("runtime: uninitialized listHead") + } + + if p == nil { + return nil + } + return (*listNodeManual)(unsafe.Add(p, head.nodeOffset)) +} + +// Returns true if the list is empty. +func (head *listHeadManual) empty() bool { + return head.obj == 0 +} + +// Returns the head of the list without removing it. +func (head *listHeadManual) head() unsafe.Pointer { + return unsafe.Pointer(head.obj) +} + +// Push p onto the front of the list. +func (head *listHeadManual) push(p unsafe.Pointer) { + // p becomes the head of the list. + + // ... so p's next is the current head. + pNode := head.getNode(p) + pNode.next = head.obj + + // ... and the current head's prev is p. + if head.obj != 0 { + headNode := head.getNode(unsafe.Pointer(head.obj)) + headNode.prev = uintptr(p) + } + + head.obj = uintptr(p) +} + +// Pop removes the head of the list. +func (head *listHeadManual) pop() unsafe.Pointer { + if head.obj == 0 { + return nil + } + + // Return the head of the list. + p := unsafe.Pointer(head.obj) + + // ... so the new head is p's next. + pNode := head.getNode(p) + head.obj = pNode.next + // p is no longer on the list. Clear next to remove unused references. + // N.B. as the head, prev must already be nil. + pNode.next = 0 + + // ... and the new head no longer has a prev. + if head.obj != 0 { + headNode := head.getNode(unsafe.Pointer(head.obj)) + headNode.prev = 0 + } + + return p +} + +// Remove p from the middle of the list. +func (head *listHeadManual) remove(p unsafe.Pointer) { + if unsafe.Pointer(head.obj) == p { + // Use pop to ensure head is updated when removing the head. + head.pop() + return + } + + pNode := head.getNode(p) + prevNode := head.getNode(unsafe.Pointer(pNode.prev)) + nextNode := head.getNode(unsafe.Pointer(pNode.next)) + + // Link prev to next. + if prevNode != nil { + prevNode.next = pNode.next + } + // Link next to prev. + if nextNode != nil { + nextNode.prev = pNode.prev + } + + pNode.prev = 0 + pNode.next = 0 +} diff --git a/src/runtime/list_manual_test.go b/src/runtime/list_manual_test.go new file mode 100644 index 00000000000..f0b64b48ec7 --- /dev/null +++ b/src/runtime/list_manual_test.go @@ -0,0 +1,416 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "internal/runtime/sys" + "runtime" + "testing" + "unsafe" +) + +// The tests in this file are identical to list_test.go, but for the +// manually-managed variants. + +type listedValManual struct { + val int + + aNode runtime.ListNodeManual + bNode runtime.ListNodeManual +} + +// ListHeadManual is intended to be used with objects where the lifetime of the +// object is managed explicitly, so it is OK to store as uintptr. +// +// This means that our test values must outlive the test, and must not live on +// the stack (which may move). +var allListedValManual []*listedValManual + +func newListedValManual(v int) *listedValManual { + lv := &listedValManual{ + val: v, + } + allListedValManual = append(allListedValManual, lv) + return lv +} + +func TestListManualPush(t *testing.T) { + var headA runtime.ListHeadManual + headA.Init(unsafe.Offsetof(listedValManual{}.aNode)) + + one := newListedValManual(1) + headA.Push(unsafe.Pointer(one)) + + two := newListedValManual(2) + headA.Push(unsafe.Pointer(two)) + + three := newListedValManual(3) + headA.Push(unsafe.Pointer(three)) + + p := headA.Pop() + v := (*listedValManual)(p) + if v == nil { + t.Fatalf("pop got nil want 3") + } + if v.val != 3 { + t.Errorf("pop got %d want 3", v.val) + } + + p = headA.Pop() + v = (*listedValManual)(p) + if v == nil { + t.Fatalf("pop got nil want 2") + } + if v.val != 2 { + t.Errorf("pop got %d want 2", v.val) + } + + p = headA.Pop() + v = (*listedValManual)(p) + if v == nil { + t.Fatalf("pop got nil want 1") + } + if v.val != 1 { + t.Errorf("pop got %d want 1", v.val) + } + + p = headA.Pop() + v = (*listedValManual)(p) + if v != nil { + t.Fatalf("pop got %+v want nil", v) + } + + runtime.KeepAlive(one) + runtime.KeepAlive(two) + runtime.KeepAlive(three) +} + +func wantValManual(t *testing.T, v *listedValManual, i int) { + t.Helper() + if v == nil { + t.Fatalf("listedVal got nil want %d", i) + } + if v.val != i { + t.Errorf("pop got %d want %d", v.val, i) + } +} + +func TestListManualRemoveHead(t *testing.T) { + var headA runtime.ListHeadManual + headA.Init(unsafe.Offsetof(listedValManual{}.aNode)) + + one := newListedValManual(1) + headA.Push(unsafe.Pointer(one)) + + two := newListedValManual(2) + headA.Push(unsafe.Pointer(two)) + + three := newListedValManual(3) + headA.Push(unsafe.Pointer(three)) + + headA.Remove(unsafe.Pointer(three)) + + p := headA.Pop() + v := (*listedValManual)(p) + wantValManual(t, v, 2) + + p = headA.Pop() + v = (*listedValManual)(p) + wantValManual(t, v, 1) + + p = headA.Pop() + v = (*listedValManual)(p) + if v != nil { + t.Fatalf("pop got %+v want nil", v) + } + + runtime.KeepAlive(one) + runtime.KeepAlive(two) + runtime.KeepAlive(three) +} + +func TestListManualRemoveMiddle(t *testing.T) { + var headA runtime.ListHeadManual + headA.Init(unsafe.Offsetof(listedValManual{}.aNode)) + + one := newListedValManual(1) + headA.Push(unsafe.Pointer(one)) + + two := newListedValManual(2) + headA.Push(unsafe.Pointer(two)) + + three := newListedValManual(3) + headA.Push(unsafe.Pointer(three)) + + headA.Remove(unsafe.Pointer(two)) + + p := headA.Pop() + v := (*listedValManual)(p) + wantValManual(t, v, 3) + + p = headA.Pop() + v = (*listedValManual)(p) + wantValManual(t, v, 1) + + p = headA.Pop() + v = (*listedValManual)(p) + if v != nil { + t.Fatalf("pop got %+v want nil", v) + } + + runtime.KeepAlive(one) + runtime.KeepAlive(two) + runtime.KeepAlive(three) +} + +func TestListManualRemoveTail(t *testing.T) { + var headA runtime.ListHeadManual + headA.Init(unsafe.Offsetof(listedValManual{}.aNode)) + + one := newListedValManual(1) + headA.Push(unsafe.Pointer(one)) + + two := newListedValManual(2) + headA.Push(unsafe.Pointer(two)) + + three := newListedValManual(3) + headA.Push(unsafe.Pointer(three)) + + headA.Remove(unsafe.Pointer(one)) + + p := headA.Pop() + v := (*listedValManual)(p) + wantValManual(t, v, 3) + + p = headA.Pop() + v = (*listedValManual)(p) + wantValManual(t, v, 2) + + p = headA.Pop() + v = (*listedValManual)(p) + if v != nil { + t.Fatalf("pop got %+v want nil", v) + } + + runtime.KeepAlive(one) + runtime.KeepAlive(two) + runtime.KeepAlive(three) +} + +func TestListManualRemoveAll(t *testing.T) { + var headA runtime.ListHeadManual + headA.Init(unsafe.Offsetof(listedValManual{}.aNode)) + + one := newListedValManual(1) + headA.Push(unsafe.Pointer(one)) + + two := newListedValManual(2) + headA.Push(unsafe.Pointer(two)) + + three := newListedValManual(3) + headA.Push(unsafe.Pointer(three)) + + headA.Remove(unsafe.Pointer(one)) + headA.Remove(unsafe.Pointer(two)) + headA.Remove(unsafe.Pointer(three)) + + p := headA.Pop() + v := (*listedValManual)(p) + if v != nil { + t.Fatalf("pop got %+v want nil", v) + } + + runtime.KeepAlive(one) + runtime.KeepAlive(two) + runtime.KeepAlive(three) +} + +// The tests below are identical, but used with a sys.NotInHeap type. + +type listedValNIH struct { + _ sys.NotInHeap + listedValManual +} + +func newListedValNIH(v int) *listedValNIH { + l := (*listedValNIH)(runtime.PersistentAlloc(unsafe.Sizeof(listedValNIH{}), unsafe.Alignof(listedValNIH{}))) + l.val = v + return l +} + +func newListHeadNIH() *runtime.ListHeadManual { + return (*runtime.ListHeadManual)(runtime.PersistentAlloc(unsafe.Sizeof(runtime.ListHeadManual{}), unsafe.Alignof(runtime.ListHeadManual{}))) +} + +func TestListNIHPush(t *testing.T) { + headA := newListHeadNIH() + headA.Init(unsafe.Offsetof(listedValNIH{}.aNode)) + + one := newListedValNIH(1) + headA.Push(unsafe.Pointer(one)) + + two := newListedValNIH(2) + headA.Push(unsafe.Pointer(two)) + + three := newListedValNIH(3) + headA.Push(unsafe.Pointer(three)) + + p := headA.Pop() + v := (*listedValNIH)(p) + if v == nil { + t.Fatalf("pop got nil want 3") + } + if v.val != 3 { + t.Errorf("pop got %d want 3", v.val) + } + + p = headA.Pop() + v = (*listedValNIH)(p) + if v == nil { + t.Fatalf("pop got nil want 2") + } + if v.val != 2 { + t.Errorf("pop got %d want 2", v.val) + } + + p = headA.Pop() + v = (*listedValNIH)(p) + if v == nil { + t.Fatalf("pop got nil want 1") + } + if v.val != 1 { + t.Errorf("pop got %d want 1", v.val) + } + + p = headA.Pop() + v = (*listedValNIH)(p) + if v != nil { + t.Fatalf("pop got %+v want nil", v) + } +} + +func wantValNIH(t *testing.T, v *listedValNIH, i int) { + t.Helper() + if v == nil { + t.Fatalf("listedVal got nil want %d", i) + } + if v.val != i { + t.Errorf("pop got %d want %d", v.val, i) + } +} + +func TestListNIHRemoveHead(t *testing.T) { + headA := newListHeadNIH() + headA.Init(unsafe.Offsetof(listedValNIH{}.aNode)) + + one := newListedValNIH(1) + headA.Push(unsafe.Pointer(one)) + + two := newListedValNIH(2) + headA.Push(unsafe.Pointer(two)) + + three := newListedValNIH(3) + headA.Push(unsafe.Pointer(three)) + + headA.Remove(unsafe.Pointer(three)) + + p := headA.Pop() + v := (*listedValNIH)(p) + wantValNIH(t, v, 2) + + p = headA.Pop() + v = (*listedValNIH)(p) + wantValNIH(t, v, 1) + + p = headA.Pop() + v = (*listedValNIH)(p) + if v != nil { + t.Fatalf("pop got %+v want nil", v) + } +} + +func TestListNIHRemoveMiddle(t *testing.T) { + headA := newListHeadNIH() + headA.Init(unsafe.Offsetof(listedValNIH{}.aNode)) + + one := newListedValNIH(1) + headA.Push(unsafe.Pointer(one)) + + two := newListedValNIH(2) + headA.Push(unsafe.Pointer(two)) + + three := newListedValNIH(3) + headA.Push(unsafe.Pointer(three)) + + headA.Remove(unsafe.Pointer(two)) + + p := headA.Pop() + v := (*listedValNIH)(p) + wantValNIH(t, v, 3) + + p = headA.Pop() + v = (*listedValNIH)(p) + wantValNIH(t, v, 1) + + p = headA.Pop() + v = (*listedValNIH)(p) + if v != nil { + t.Fatalf("pop got %+v want nil", v) + } +} + +func TestListNIHRemoveTail(t *testing.T) { + headA := newListHeadNIH() + headA.Init(unsafe.Offsetof(listedValNIH{}.aNode)) + + one := newListedValNIH(1) + headA.Push(unsafe.Pointer(one)) + + two := newListedValNIH(2) + headA.Push(unsafe.Pointer(two)) + + three := newListedValNIH(3) + headA.Push(unsafe.Pointer(three)) + + headA.Remove(unsafe.Pointer(one)) + + p := headA.Pop() + v := (*listedValNIH)(p) + wantValNIH(t, v, 3) + + p = headA.Pop() + v = (*listedValNIH)(p) + wantValNIH(t, v, 2) + + p = headA.Pop() + v = (*listedValNIH)(p) + if v != nil { + t.Fatalf("pop got %+v want nil", v) + } +} + +func TestListNIHRemoveAll(t *testing.T) { + headA := newListHeadNIH() + headA.Init(unsafe.Offsetof(listedValNIH{}.aNode)) + + one := newListedValNIH(1) + headA.Push(unsafe.Pointer(one)) + + two := newListedValNIH(2) + headA.Push(unsafe.Pointer(two)) + + three := newListedValNIH(3) + headA.Push(unsafe.Pointer(three)) + + headA.Remove(unsafe.Pointer(one)) + headA.Remove(unsafe.Pointer(two)) + headA.Remove(unsafe.Pointer(three)) + + p := headA.Pop() + v := (*listedValNIH)(p) + if v != nil { + t.Fatalf("pop got %+v want nil", v) + } +} diff --git a/src/runtime/list_test.go b/src/runtime/list_test.go new file mode 100644 index 00000000000..5839bf63c31 --- /dev/null +++ b/src/runtime/list_test.go @@ -0,0 +1,215 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "runtime" + "testing" + "unsafe" +) + +type listedVal struct { + val int + + aNode runtime.ListNode + bNode runtime.ListNode +} + +func newListedVal(v int) *listedVal { + return &listedVal{ + val: v, + } +} + +func TestListPush(t *testing.T) { + var headA runtime.ListHead + headA.Init(unsafe.Offsetof(listedVal{}.aNode)) + + one := newListedVal(1) + headA.Push(unsafe.Pointer(one)) + + two := newListedVal(2) + headA.Push(unsafe.Pointer(two)) + + three := newListedVal(3) + headA.Push(unsafe.Pointer(three)) + + p := headA.Pop() + v := (*listedVal)(p) + if v == nil { + t.Fatalf("pop got nil want 3") + } + if v.val != 3 { + t.Errorf("pop got %d want 3", v.val) + } + + p = headA.Pop() + v = (*listedVal)(p) + if v == nil { + t.Fatalf("pop got nil want 2") + } + if v.val != 2 { + t.Errorf("pop got %d want 2", v.val) + } + + p = headA.Pop() + v = (*listedVal)(p) + if v == nil { + t.Fatalf("pop got nil want 1") + } + if v.val != 1 { + t.Errorf("pop got %d want 1", v.val) + } + + p = headA.Pop() + v = (*listedVal)(p) + if v != nil { + t.Fatalf("pop got %+v want nil", v) + } +} + +func wantVal(t *testing.T, v *listedVal, i int) { + t.Helper() + if v == nil { + t.Fatalf("listedVal got nil want %d", i) + } + if v.val != i { + t.Errorf("pop got %d want %d", v.val, i) + } +} + +func TestListRemoveHead(t *testing.T) { + var headA runtime.ListHead + headA.Init(unsafe.Offsetof(listedVal{}.aNode)) + + one := newListedVal(1) + headA.Push(unsafe.Pointer(one)) + + two := newListedVal(2) + headA.Push(unsafe.Pointer(two)) + + three := newListedVal(3) + headA.Push(unsafe.Pointer(three)) + + headA.Remove(unsafe.Pointer(three)) + + p := headA.Pop() + v := (*listedVal)(p) + wantVal(t, v, 2) + + p = headA.Pop() + v = (*listedVal)(p) + wantVal(t, v, 1) + + p = headA.Pop() + v = (*listedVal)(p) + if v != nil { + t.Fatalf("pop got %+v want nil", v) + } +} + +func TestListRemoveMiddle(t *testing.T) { + var headA runtime.ListHead + headA.Init(unsafe.Offsetof(listedVal{}.aNode)) + + one := newListedVal(1) + headA.Push(unsafe.Pointer(one)) + + two := newListedVal(2) + headA.Push(unsafe.Pointer(two)) + + three := newListedVal(3) + headA.Push(unsafe.Pointer(three)) + + headA.Remove(unsafe.Pointer(two)) + + p := headA.Pop() + v := (*listedVal)(p) + wantVal(t, v, 3) + + p = headA.Pop() + v = (*listedVal)(p) + wantVal(t, v, 1) + + p = headA.Pop() + v = (*listedVal)(p) + if v != nil { + t.Fatalf("pop got %+v want nil", v) + } +} + +func TestListRemoveTail(t *testing.T) { + var headA runtime.ListHead + headA.Init(unsafe.Offsetof(listedVal{}.aNode)) + + one := newListedVal(1) + headA.Push(unsafe.Pointer(one)) + + two := newListedVal(2) + headA.Push(unsafe.Pointer(two)) + + three := newListedVal(3) + headA.Push(unsafe.Pointer(three)) + + headA.Remove(unsafe.Pointer(one)) + + p := headA.Pop() + v := (*listedVal)(p) + wantVal(t, v, 3) + + p = headA.Pop() + v = (*listedVal)(p) + wantVal(t, v, 2) + + p = headA.Pop() + v = (*listedVal)(p) + if v != nil { + t.Fatalf("pop got %+v want nil", v) + } +} + +func TestListRemoveAll(t *testing.T) { + var headA runtime.ListHead + headA.Init(unsafe.Offsetof(listedVal{}.aNode)) + + one := newListedVal(1) + headA.Push(unsafe.Pointer(one)) + + two := newListedVal(2) + headA.Push(unsafe.Pointer(two)) + + three := newListedVal(3) + headA.Push(unsafe.Pointer(three)) + + headA.Remove(unsafe.Pointer(one)) + headA.Remove(unsafe.Pointer(two)) + headA.Remove(unsafe.Pointer(three)) + + p := headA.Pop() + v := (*listedVal)(p) + if v != nil { + t.Fatalf("pop got %+v want nil", v) + } +} + +func BenchmarkListPushPop(b *testing.B) { + var head runtime.ListHead + head.Init(unsafe.Offsetof(listedVal{}.aNode)) + + vals := make([]listedVal, 10000) + i := 0 + for b.Loop() { + if i == len(vals) { + for range len(vals) { + head.Pop() + } + i = 0 + } + + head.Push(unsafe.Pointer(&vals[i])) + + i++ + } +} diff --git a/src/runtime/lockrank.go b/src/runtime/lockrank.go index 9821e499989..9e676bf8e35 100644 --- a/src/runtime/lockrank.go +++ b/src/runtime/lockrank.go @@ -71,6 +71,7 @@ const ( // WB lockRankWbufSpans lockRankXRegAlloc + lockRankSpanSPMCs lockRankMheap lockRankMheapSpecial lockRankGlobalAlloc @@ -145,6 +146,7 @@ var lockNames = []string{ lockRankHchanLeaf: "hchanLeaf", lockRankWbufSpans: "wbufSpans", lockRankXRegAlloc: "xRegAlloc", + lockRankSpanSPMCs: "spanSPMCs", lockRankMheap: "mheap", lockRankMheapSpecial: "mheapSpecial", lockRankGlobalAlloc: "globalAlloc", @@ -231,9 +233,10 @@ var lockPartialOrder [][]lockRank = [][]lockRank{ lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf}, lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, lockRankXRegAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, + lockRankSpanSPMCs: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans}, lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, - lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankXRegAlloc, lockRankMheap, lockRankMheapSpecial}, + lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankXRegAlloc, lockRankSpanSPMCs, lockRankMheap, lockRankMheapSpecial}, lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace}, lockRankPanic: {}, diff --git a/src/runtime/malloc_generated.go b/src/runtime/malloc_generated.go index 600048c6755..2215dbaddb2 100644 --- a/src/runtime/malloc_generated.go +++ b/src/runtime/malloc_generated.go @@ -150,6 +150,10 @@ func mallocgcSmallScanNoHeaderSC1(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -304,6 +308,10 @@ func mallocgcSmallScanNoHeaderSC2(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -458,6 +466,10 @@ func mallocgcSmallScanNoHeaderSC3(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -612,6 +624,10 @@ func mallocgcSmallScanNoHeaderSC4(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -766,6 +782,10 @@ func mallocgcSmallScanNoHeaderSC5(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -920,6 +940,10 @@ func mallocgcSmallScanNoHeaderSC6(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -1074,6 +1098,10 @@ func mallocgcSmallScanNoHeaderSC7(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -1228,6 +1256,10 @@ func mallocgcSmallScanNoHeaderSC8(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -1382,6 +1414,10 @@ func mallocgcSmallScanNoHeaderSC9(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -1536,6 +1572,10 @@ func mallocgcSmallScanNoHeaderSC10(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -1690,6 +1730,10 @@ func mallocgcSmallScanNoHeaderSC11(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -1844,6 +1888,10 @@ func mallocgcSmallScanNoHeaderSC12(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -1998,6 +2046,10 @@ func mallocgcSmallScanNoHeaderSC13(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -2152,6 +2204,10 @@ func mallocgcSmallScanNoHeaderSC14(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -2306,6 +2362,10 @@ func mallocgcSmallScanNoHeaderSC15(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -2460,6 +2520,10 @@ func mallocgcSmallScanNoHeaderSC16(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -2614,6 +2678,10 @@ func mallocgcSmallScanNoHeaderSC17(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -2768,6 +2836,10 @@ func mallocgcSmallScanNoHeaderSC18(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -2922,6 +2994,10 @@ func mallocgcSmallScanNoHeaderSC19(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -3076,6 +3152,10 @@ func mallocgcSmallScanNoHeaderSC20(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -3230,6 +3310,10 @@ func mallocgcSmallScanNoHeaderSC21(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -3384,6 +3468,10 @@ func mallocgcSmallScanNoHeaderSC22(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -3538,6 +3626,10 @@ func mallocgcSmallScanNoHeaderSC23(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -3692,6 +3784,10 @@ func mallocgcSmallScanNoHeaderSC24(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -3846,6 +3942,10 @@ func mallocgcSmallScanNoHeaderSC25(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4000,6 +4100,10 @@ func mallocgcSmallScanNoHeaderSC26(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4064,6 +4168,10 @@ func mallocTiny1(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4142,6 +4250,10 @@ func mallocTiny1(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4206,6 +4318,10 @@ func mallocTiny2(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4284,6 +4400,10 @@ func mallocTiny2(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4348,6 +4468,10 @@ func mallocTiny3(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4426,6 +4550,10 @@ func mallocTiny3(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4490,6 +4618,10 @@ func mallocTiny4(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4568,6 +4700,10 @@ func mallocTiny4(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4632,6 +4768,10 @@ func mallocTiny5(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4710,6 +4850,10 @@ func mallocTiny5(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4774,6 +4918,10 @@ func mallocTiny6(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4852,6 +5000,10 @@ func mallocTiny6(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4916,6 +5068,10 @@ func mallocTiny7(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4994,6 +5150,10 @@ func mallocTiny7(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5058,6 +5218,10 @@ func mallocTiny8(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5136,6 +5300,10 @@ func mallocTiny8(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5200,6 +5368,10 @@ func mallocTiny9(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5278,6 +5450,10 @@ func mallocTiny9(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5342,6 +5518,10 @@ func mallocTiny10(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5420,6 +5600,10 @@ func mallocTiny10(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5484,6 +5668,10 @@ func mallocTiny11(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5562,6 +5750,10 @@ func mallocTiny11(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5626,6 +5818,10 @@ func mallocTiny12(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5704,6 +5900,10 @@ func mallocTiny12(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5768,6 +5968,10 @@ func mallocTiny13(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5846,6 +6050,10 @@ func mallocTiny13(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5910,6 +6118,10 @@ func mallocTiny14(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5988,6 +6200,10 @@ func mallocTiny14(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6052,6 +6268,10 @@ func mallocTiny15(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6130,6 +6350,10 @@ func mallocTiny15(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6223,6 +6447,10 @@ func mallocgcSmallNoScanSC2(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6316,6 +6544,10 @@ func mallocgcSmallNoScanSC3(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6409,6 +6641,10 @@ func mallocgcSmallNoScanSC4(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6502,6 +6738,10 @@ func mallocgcSmallNoScanSC5(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6595,6 +6835,10 @@ func mallocgcSmallNoScanSC6(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6688,6 +6932,10 @@ func mallocgcSmallNoScanSC7(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6781,6 +7029,10 @@ func mallocgcSmallNoScanSC8(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6874,6 +7126,10 @@ func mallocgcSmallNoScanSC9(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6967,6 +7223,10 @@ func mallocgcSmallNoScanSC10(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7060,6 +7320,10 @@ func mallocgcSmallNoScanSC11(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7153,6 +7417,10 @@ func mallocgcSmallNoScanSC12(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7246,6 +7514,10 @@ func mallocgcSmallNoScanSC13(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7339,6 +7611,10 @@ func mallocgcSmallNoScanSC14(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7432,6 +7708,10 @@ func mallocgcSmallNoScanSC15(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7525,6 +7805,10 @@ func mallocgcSmallNoScanSC16(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7618,6 +7902,10 @@ func mallocgcSmallNoScanSC17(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7711,6 +7999,10 @@ func mallocgcSmallNoScanSC18(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7804,6 +8096,10 @@ func mallocgcSmallNoScanSC19(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7897,6 +8193,10 @@ func mallocgcSmallNoScanSC20(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7990,6 +8290,10 @@ func mallocgcSmallNoScanSC21(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -8083,6 +8387,10 @@ func mallocgcSmallNoScanSC22(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -8176,6 +8484,10 @@ func mallocgcSmallNoScanSC23(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -8269,6 +8581,10 @@ func mallocgcSmallNoScanSC24(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -8362,6 +8678,10 @@ func mallocgcSmallNoScanSC25(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -8455,6 +8775,10 @@ func mallocgcSmallNoScanSC26(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) diff --git a/src/runtime/malloc_stubs.go b/src/runtime/malloc_stubs.go index 7fd14441893..224746f3d41 100644 --- a/src/runtime/malloc_stubs.go +++ b/src/runtime/malloc_stubs.go @@ -50,6 +50,8 @@ func mallocPanic(size uintptr, typ *_type, needzero bool) unsafe.Pointer { panic("not defined for sizeclass") } +// WARNING: mallocStub does not do any work for sanitizers so callers need +// to steer out of this codepath early if sanitizers are enabled. func mallocStub(size uintptr, typ *_type, needzero bool) unsafe.Pointer { if doubleCheckMalloc { if gcphase == _GCmarktermination { @@ -77,6 +79,13 @@ func mallocStub(size uintptr, typ *_type, needzero bool) unsafe.Pointer { // Actually do the allocation. x, elemsize := inlinedMalloc(size, typ, needzero) + // Notify valgrind, if enabled. + // To allow the compiler to not know about valgrind, we do valgrind instrumentation + // unlike the other sanitizers. + if valgrindenabled { + valgrindMalloc(x, size) + } + // Adjust our GC assist debt to account for internal fragmentation. if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { diff --git a/src/runtime/map_benchmark_test.go b/src/runtime/map_benchmark_test.go index a26b35b44dc..9e93b219f17 100644 --- a/src/runtime/map_benchmark_test.go +++ b/src/runtime/map_benchmark_test.go @@ -493,7 +493,6 @@ func BenchmarkMapInterfacePtr(b *testing.B) { m := map[any]bool{} for i := 0; i < 100; i++ { - i := i m[&i] = true } diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go index a1d04d2f8a2..cade81031d0 100644 --- a/src/runtime/mcache.go +++ b/src/runtime/mcache.go @@ -50,7 +50,7 @@ type mcache struct { // flushGen indicates the sweepgen during which this mcache // was last flushed. If flushGen != mheap_.sweepgen, the spans - // in this mcache are stale and need to the flushed so they + // in this mcache are stale and need to be flushed so they // can be swept. This is done in acquirep. flushGen atomic.Uint32 } diff --git a/src/runtime/mem.go b/src/runtime/mem.go index cd06ea323d8..f373173eb36 100644 --- a/src/runtime/mem.go +++ b/src/runtime/mem.go @@ -19,7 +19,7 @@ import "unsafe" // fault, may give back unexpected zeroes, etc.). // 4) Ready - may be accessed safely. // -// This set of states is more than is strictly necessary to support all the +// This set of states is more than strictly necessary to support all the // currently supported platforms. One could get by with just None, Reserved, and // Ready. However, the Prepared state gives us flexibility for performance // purposes. For example, on POSIX-y operating systems, Reserved is usually a @@ -70,6 +70,12 @@ func sysUnused(v unsafe.Pointer, n uintptr) { sysUnusedOS(v, n) } +// needZeroAfterSysUnused reports whether memory returned by sysUnused must be +// zeroed for use. +func needZeroAfterSysUnused() bool { + return needZeroAfterSysUnusedOS() +} + // sysUsed transitions a memory region from Prepared to Ready. It notifies the // operating system that the memory region is needed and ensures that the region // may be safely accessed. This is typically a no-op on systems that don't have diff --git a/src/runtime/mem_aix.go b/src/runtime/mem_aix.go index c5e4710dacf..1203af57972 100644 --- a/src/runtime/mem_aix.go +++ b/src/runtime/mem_aix.go @@ -79,3 +79,7 @@ func sysMapOS(v unsafe.Pointer, n uintptr, _ string) { throw("runtime: cannot map pages in arena address space") } } + +func needZeroAfterSysUnusedOS() bool { + return true +} diff --git a/src/runtime/mem_bsd.go b/src/runtime/mem_bsd.go index 0c05b44c08f..70375615da3 100644 --- a/src/runtime/mem_bsd.go +++ b/src/runtime/mem_bsd.go @@ -85,3 +85,7 @@ func sysMapOS(v unsafe.Pointer, n uintptr, _ string) { throw("runtime: cannot map pages in arena address space") } } + +func needZeroAfterSysUnusedOS() bool { + return true +} diff --git a/src/runtime/mem_darwin.go b/src/runtime/mem_darwin.go index 9d4de516228..100512f5cdf 100644 --- a/src/runtime/mem_darwin.go +++ b/src/runtime/mem_darwin.go @@ -74,3 +74,7 @@ func sysMapOS(v unsafe.Pointer, n uintptr, _ string) { throw("runtime: cannot map pages in arena address space") } } + +func needZeroAfterSysUnusedOS() bool { + return true +} diff --git a/src/runtime/mem_linux.go b/src/runtime/mem_linux.go index 24e006debca..ce255376114 100644 --- a/src/runtime/mem_linux.go +++ b/src/runtime/mem_linux.go @@ -188,3 +188,7 @@ func sysMapOS(v unsafe.Pointer, n uintptr, vmaName string) { sysNoHugePageOS(v, n) } } + +func needZeroAfterSysUnusedOS() bool { + return debug.madvdontneed == 0 +} diff --git a/src/runtime/mem_sbrk.go b/src/runtime/mem_sbrk.go index 05f0fdb5d74..9e752df2c33 100644 --- a/src/runtime/mem_sbrk.go +++ b/src/runtime/mem_sbrk.go @@ -48,6 +48,16 @@ type memHdrPtr uintptr func (p memHdrPtr) ptr() *memHdr { return (*memHdr)(unsafe.Pointer(p)) } func (p *memHdrPtr) set(x *memHdr) { *p = memHdrPtr(unsafe.Pointer(x)) } +// memAlloc allocates n bytes from the brk reservation, or if it's full, +// the system. +// +// memlock must be held. +// +// memAlloc must be called on the system stack, otherwise a stack growth +// could cause us to call back into it. Since memlock is held, that could +// lead to a self-deadlock. +// +//go:systemstack func memAlloc(n uintptr) unsafe.Pointer { if p := memAllocNoGrow(n); p != nil { return p @@ -55,6 +65,15 @@ func memAlloc(n uintptr) unsafe.Pointer { return sbrk(n) } +// memAllocNoGrow attempts to allocate n bytes from the existing brk. +// +// memlock must be held. +// +// memAlloc must be called on the system stack, otherwise a stack growth +// could cause us to call back into it. Since memlock is held, that could +// lead to a self-deadlock. +// +//go:systemstack func memAllocNoGrow(n uintptr) unsafe.Pointer { n = memRound(n) var prevp *memHdr @@ -78,6 +97,15 @@ func memAllocNoGrow(n uintptr) unsafe.Pointer { return nil } +// memFree makes [ap, ap+n) available for reallocation by memAlloc. +// +// memlock must be held. +// +// memAlloc must be called on the system stack, otherwise a stack growth +// could cause us to call back into it. Since memlock is held, that could +// lead to a self-deadlock. +// +//go:systemstack func memFree(ap unsafe.Pointer, n uintptr) { n = memRound(n) memclrNoHeapPointers(ap, n) @@ -122,6 +150,15 @@ func memFree(ap unsafe.Pointer, n uintptr) { } } +// memCheck checks invariants around free list management. +// +// memlock must be held. +// +// memAlloc must be called on the system stack, otherwise a stack growth +// could cause us to call back into it. Since memlock is held, that could +// lead to a self-deadlock. +// +//go:systemstack func memCheck() { if !memDebug { return @@ -158,26 +195,31 @@ func initBloc() { } func sysAllocOS(n uintptr, _ string) unsafe.Pointer { - lock(&memlock) - p := memAlloc(n) - memCheck() - unlock(&memlock) - return p + var p uintptr + systemstack(func() { + lock(&memlock) + p = uintptr(memAlloc(n)) + memCheck() + unlock(&memlock) + }) + return unsafe.Pointer(p) } func sysFreeOS(v unsafe.Pointer, n uintptr) { - lock(&memlock) - if uintptr(v)+n == bloc { - // Address range being freed is at the end of memory, - // so record a new lower value for end of memory. - // Can't actually shrink address space because segment is shared. - memclrNoHeapPointers(v, n) - bloc -= n - } else { - memFree(v, n) - memCheck() - } - unlock(&memlock) + systemstack(func() { + lock(&memlock) + if uintptr(v)+n == bloc { + // Address range being freed is at the end of memory, + // so record a new lower value for end of memory. + // Can't actually shrink address space because segment is shared. + memclrNoHeapPointers(v, n) + bloc -= n + } else { + memFree(v, n) + memCheck() + } + unlock(&memlock) + }) } func sysUnusedOS(v unsafe.Pointer, n uintptr) { @@ -202,49 +244,59 @@ func sysFaultOS(v unsafe.Pointer, n uintptr) { } func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer { - lock(&memlock) - var p unsafe.Pointer - if uintptr(v) == bloc { - // Address hint is the current end of memory, - // so try to extend the address space. - p = sbrk(n) - } - if p == nil && v == nil { - p = memAlloc(n) - memCheck() - } - unlock(&memlock) - return p + var p uintptr + systemstack(func() { + lock(&memlock) + if uintptr(v) == bloc { + // Address hint is the current end of memory, + // so try to extend the address space. + p = uintptr(sbrk(n)) + } + if p == 0 && v == nil { + p = uintptr(memAlloc(n)) + memCheck() + } + unlock(&memlock) + }) + return unsafe.Pointer(p) } func sysReserveAlignedSbrk(size, align uintptr) (unsafe.Pointer, uintptr) { - lock(&memlock) - if p := memAllocNoGrow(size + align); p != nil { - // We can satisfy the reservation from the free list. - // Trim off the unaligned parts. - pAligned := alignUp(uintptr(p), align) - if startLen := pAligned - uintptr(p); startLen > 0 { - memFree(p, startLen) + var p uintptr + systemstack(func() { + lock(&memlock) + if base := memAllocNoGrow(size + align); base != nil { + // We can satisfy the reservation from the free list. + // Trim off the unaligned parts. + start := alignUp(uintptr(base), align) + if startLen := start - uintptr(base); startLen > 0 { + memFree(base, startLen) + } + end := start + size + if endLen := (uintptr(base) + size + align) - end; endLen > 0 { + memFree(unsafe.Pointer(end), endLen) + } + memCheck() + unlock(&memlock) + p = start + return } - end := pAligned + size - if endLen := (uintptr(p) + size + align) - end; endLen > 0 { - memFree(unsafe.Pointer(end), endLen) - } - memCheck() - unlock(&memlock) - return unsafe.Pointer(pAligned), size - } - // Round up bloc to align, then allocate size. - p := alignUp(bloc, align) - r := sbrk(p + size - bloc) - if r == nil { - p, size = 0, 0 - } else if l := p - uintptr(r); l > 0 { - // Free the area we skipped over for alignment. - memFree(r, l) - memCheck() - } - unlock(&memlock) + // Round up bloc to align, then allocate size. + p = alignUp(bloc, align) + r := sbrk(p + size - bloc) + if r == nil { + p, size = 0, 0 + } else if l := p - uintptr(r); l > 0 { + // Free the area we skipped over for alignment. + memFree(r, l) + memCheck() + } + unlock(&memlock) + }) return unsafe.Pointer(p), size } + +func needZeroAfterSysUnusedOS() bool { + return true +} diff --git a/src/runtime/mem_windows.go b/src/runtime/mem_windows.go index 3db6fc2ba40..afc2dee19ff 100644 --- a/src/runtime/mem_windows.go +++ b/src/runtime/mem_windows.go @@ -132,3 +132,7 @@ func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer { func sysMapOS(v unsafe.Pointer, n uintptr, _ string) { } + +func needZeroAfterSysUnusedOS() bool { + return true +} diff --git a/src/runtime/memclr_arm64.s b/src/runtime/memclr_arm64.s index 3e49f7fcf6a..3f8acdaeff6 100644 --- a/src/runtime/memclr_arm64.s +++ b/src/runtime/memclr_arm64.s @@ -176,7 +176,7 @@ aligned: PCALIGN $16 loop_zva: - WORD $0xd50b7420 // DC ZVA, R0 + DC ZVA, R0 ADD R5, R0, R0 SUBS R5, R1, R1 BHS loop_zva diff --git a/src/runtime/memmove_test.go b/src/runtime/memmove_test.go index 22905504d45..6065a845539 100644 --- a/src/runtime/memmove_test.go +++ b/src/runtime/memmove_test.go @@ -221,8 +221,6 @@ func TestMemmoveAtomicity(t *testing.T) { for _, backward := range []bool{true, false} { for _, n := range []int{3, 4, 5, 6, 7, 8, 9, 10, 15, 25, 49} { - n := n - // test copying [N]*int. sz := uintptr(n * PtrSize) name := fmt.Sprint(sz) diff --git a/src/runtime/metrics.go b/src/runtime/metrics.go index 36efef39c03..2710bf68652 100644 --- a/src/runtime/metrics.go +++ b/src/runtime/metrics.go @@ -469,42 +469,42 @@ func initMetrics() { deps: makeStatDepSet(schedStatsDep), compute: func(in *statAggregate, out *metricValue) { out.kind = metricKindUint64 - out.scalar = uint64(in.schedStats.gTotal) + out.scalar = in.schedStats.gTotal }, }, "/sched/goroutines/not-in-go:goroutines": { deps: makeStatDepSet(schedStatsDep), compute: func(in *statAggregate, out *metricValue) { out.kind = metricKindUint64 - out.scalar = uint64(in.schedStats.gNonGo) + out.scalar = in.schedStats.gNonGo }, }, "/sched/goroutines/running:goroutines": { deps: makeStatDepSet(schedStatsDep), compute: func(in *statAggregate, out *metricValue) { out.kind = metricKindUint64 - out.scalar = uint64(in.schedStats.gRunning) + out.scalar = in.schedStats.gRunning }, }, "/sched/goroutines/runnable:goroutines": { deps: makeStatDepSet(schedStatsDep), compute: func(in *statAggregate, out *metricValue) { out.kind = metricKindUint64 - out.scalar = uint64(in.schedStats.gRunnable) + out.scalar = in.schedStats.gRunnable }, }, "/sched/goroutines/waiting:goroutines": { deps: makeStatDepSet(schedStatsDep), compute: func(in *statAggregate, out *metricValue) { out.kind = metricKindUint64 - out.scalar = uint64(in.schedStats.gWaiting) + out.scalar = in.schedStats.gWaiting }, }, "/sched/goroutines-created:goroutines": { deps: makeStatDepSet(schedStatsDep), compute: func(in *statAggregate, out *metricValue) { out.kind = metricKindUint64 - out.scalar = uint64(in.schedStats.gCreated) + out.scalar = in.schedStats.gCreated }, }, "/sched/latencies:seconds": { @@ -536,7 +536,7 @@ func initMetrics() { deps: makeStatDepSet(schedStatsDep), compute: func(in *statAggregate, out *metricValue) { out.kind = metricKindUint64 - out.scalar = uint64(in.schedStats.threads) + out.scalar = in.schedStats.threads }, }, "/sync/mutex/wait/total:seconds": { @@ -818,9 +818,12 @@ func (a *schedStatsAggregate) compute() { a.gCreated += p.goroutinesCreated switch p.status { case _Prunning: - a.gRunning++ - case _Psyscall: - a.gNonGo++ + if thread, ok := setBlockOnExitSyscall(p); ok { + thread.resume() + a.gNonGo++ + } else { + a.gRunning++ + } case _Pgcstop: // The world is stopping or stopped. // This is fine. The results will be @@ -847,7 +850,7 @@ func (a *schedStatsAggregate) compute() { // Global run queue. a.gRunnable += uint64(sched.runq.size) - // Account for Gs that are in _Gsyscall without a P in _Psyscall. + // Account for Gs that are in _Gsyscall without a P. nGsyscallNoP := sched.nGsyscallNoP.Load() // nGsyscallNoP can go negative during temporary races. diff --git a/src/runtime/metrics/doc.go b/src/runtime/metrics/doc.go index e40ce25ff9d..8f908f5b520 100644 --- a/src/runtime/metrics/doc.go +++ b/src/runtime/metrics/doc.go @@ -309,6 +309,11 @@ Below is the full list of supported metrics, ordered lexicographically. The number of non-default behaviors executed by the net/http package due to a non-default GODEBUG=http2server=... setting. + /godebug/non-default-behavior/httpcookiemaxnum:events + The number of non-default behaviors executed by the net/http + package due to a non-default GODEBUG=httpcookiemaxnum=... + setting. + /godebug/non-default-behavior/httplaxcontentlength:events The number of non-default behaviors executed by the net/http package due to a non-default GODEBUG=httplaxcontentlength=... @@ -394,6 +399,11 @@ Below is the full list of supported metrics, ordered lexicographically. The number of non-default behaviors executed by the runtime package due to a non-default GODEBUG=updatemaxprocs=... setting. + /godebug/non-default-behavior/urlstrictcolons:events + The number of non-default behaviors executed by the net/url + package due to a non-default GODEBUG=urlstrictcolons=... + setting. + /godebug/non-default-behavior/winreadlinkvolume:events The number of non-default behaviors executed by the os package due to a non-default GODEBUG=winreadlinkvolume=... setting. diff --git a/src/runtime/metrics_test.go b/src/runtime/metrics_test.go index b67424301b4..92cec75465c 100644 --- a/src/runtime/metrics_test.go +++ b/src/runtime/metrics_test.go @@ -471,7 +471,7 @@ func BenchmarkReadMetricsLatency(b *testing.B) { b.ReportMetric(float64(latencies[len(latencies)*99/100]), "p99-ns") } -var readMetricsSink [1024]interface{} +var readMetricsSink [1024]any func TestReadMetricsCumulative(t *testing.T) { // Set up the set of metrics marked cumulative. diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index b13ec845fc4..43afbc330bb 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -195,10 +195,12 @@ func gcinit() { work.startSema = 1 work.markDoneSema = 1 + work.spanSPMCs.list.init(unsafe.Offsetof(spanSPMC{}.allnode)) lockInit(&work.sweepWaiters.lock, lockRankSweepWaiters) lockInit(&work.assistQueue.lock, lockRankAssistQueue) lockInit(&work.strongFromWeak.lock, lockRankStrongFromWeakQueue) lockInit(&work.wbufSpans.lock, lockRankWbufSpans) + lockInit(&work.spanSPMCs.lock, lockRankSpanSPMCs) lockInit(&gcCleanups.lock, lockRankCleanupQueue) } @@ -314,7 +316,7 @@ func pollFractionalWorkerExit() bool { return true } p := getg().m.p.ptr() - selfTime := p.gcFractionalMarkTime + (now - p.gcMarkWorkerStartTime) + selfTime := p.gcFractionalMarkTime.Load() + (now - p.gcMarkWorkerStartTime) // Add some slack to the utilization goal so that the // fractional worker isn't behind again the instant it exits. return float64(selfTime)/float64(delta) > 1.2*gcController.fractionalUtilizationGoal @@ -352,8 +354,8 @@ type workType struct { // // Only used if goexperiment.GreenTeaGC. spanSPMCs struct { - lock mutex // no lock rank because it's a leaf lock (see mklockrank.go). - all *spanSPMC + lock mutex + list listHeadManual // *spanSPMC } // Restore 64-bit alignment on 32-bit. @@ -1856,7 +1858,7 @@ func gcBgMarkWorker(ready chan struct{}) { pp.limiterEvent.stop(limiterEventIdleMarkWork, now) } if pp.gcMarkWorkerMode == gcMarkWorkerFractionalMode { - atomic.Xaddint64(&pp.gcFractionalMarkTime, duration) + pp.gcFractionalMarkTime.Add(duration) } // We'll releasem after this point and thus this P may run @@ -2046,8 +2048,7 @@ func gcSweep(mode gcMode) bool { prepareFreeWorkbufs() for freeSomeWbufs(false) { } - for freeSomeSpanSPMCs(false) { - } + freeDeadSpanSPMCs() // All "free" events for this mark/sweep cycle have // now happened, so we can make this profile cycle // available immediately. diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go index ba3824f00dc..dd76973c623 100644 --- a/src/runtime/mgcmark.go +++ b/src/runtime/mgcmark.go @@ -911,7 +911,7 @@ func scanstack(gp *g, gcw *gcWork) int64 { default: print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") throw("mark - bad status") - case _Gdead: + case _Gdead, _Gdeadextra: return 0 case _Grunning: print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") diff --git a/src/runtime/mgcmark_greenteagc.go b/src/runtime/mgcmark_greenteagc.go index 3975e1e76b5..3594b33cfd4 100644 --- a/src/runtime/mgcmark_greenteagc.go +++ b/src/runtime/mgcmark_greenteagc.go @@ -618,6 +618,37 @@ func (q *spanQueue) refill(r *spanSPMC) objptr { return q.tryGetFast() } +// destroy frees all chains in an empty spanQueue. +// +// Preconditions: +// - World is stopped. +// - GC is outside of the mark phase. +// - (Therefore) the queue is empty. +func (q *spanQueue) destroy() { + assertWorldStopped() + if gcphase != _GCoff { + throw("spanQueue.destroy during the mark phase") + } + if !q.empty() { + throw("spanQueue.destroy on non-empty queue") + } + + lock(&work.spanSPMCs.lock) + + // Remove, deinitialize, and free each ring. + for r := (*spanSPMC)(q.chain.tail.Load()); r != nil; r = (*spanSPMC)(r.prev.Load()) { + work.spanSPMCs.list.remove(unsafe.Pointer(r)) + r.deinit() + mheap_.spanSPMCAlloc.free(unsafe.Pointer(r)) + } + + q.chain.head = nil + q.chain.tail.Store(nil) + q.putsSinceDrain = 0 + + unlock(&work.spanSPMCs.lock) +} + // spanSPMC is a ring buffer of objptrs that represent spans. // Accessed without a lock. // @@ -646,10 +677,10 @@ func (q *spanQueue) refill(r *spanSPMC) objptr { type spanSPMC struct { _ sys.NotInHeap - // allnext is the link to the next spanSPMC on the work.spanSPMCs list. - // This is used to find and free dead spanSPMCs. Protected by + // allnode is the linked list node for work.spanSPMCs list. This is + // used to find and free dead spanSPMCs. Protected by // work.spanSPMCs.lock. - allnext *spanSPMC + allnode listNodeManual // dead indicates whether the spanSPMC is no longer in use. // Protected by the CAS to the prev field of the spanSPMC pointing @@ -677,8 +708,7 @@ type spanSPMC struct { func newSpanSPMC(cap uint32) *spanSPMC { lock(&work.spanSPMCs.lock) r := (*spanSPMC)(mheap_.spanSPMCAlloc.alloc()) - r.allnext = work.spanSPMCs.all - work.spanSPMCs.all = r + work.spanSPMCs.list.push(unsafe.Pointer(r)) unlock(&work.spanSPMCs.lock) // If cap < the capacity of a single physical page, round up. @@ -714,6 +744,7 @@ func (r *spanSPMC) deinit() { r.head.Store(0) r.tail.Store(0) r.cap = 0 + r.allnode = listNodeManual{} } // slot returns a pointer to slot i%r.cap. @@ -722,13 +753,8 @@ func (r *spanSPMC) slot(i uint32) *objptr { return (*objptr)(unsafe.Add(unsafe.Pointer(r.ring), idx*unsafe.Sizeof(objptr(0)))) } -// freeSomeSpanSPMCs frees some spanSPMCs back to the OS and returns -// true if it should be called again to free more. -func freeSomeSpanSPMCs(preemptible bool) bool { - // TODO(mknyszek): This is arbitrary, but some kind of limit is necessary - // to help bound delays to cooperatively preempt ourselves. - const batchSize = 64 - +// freeDeadSpanSPMCs frees dead spanSPMCs back to the OS. +func freeDeadSpanSPMCs() { // According to the SPMC memory management invariants, we can only free // spanSPMCs outside of the mark phase. We ensure we do this in two ways. // @@ -740,33 +766,29 @@ func freeSomeSpanSPMCs(preemptible bool) bool { // // This way, we ensure that we don't start freeing if we're in the wrong // phase, and the phase can't change on us while we're freeing. + // + // TODO(go.dev/issue/75771): Due to the grow semantics in + // spanQueue.drain, we expect a steady-state of around one spanSPMC per + // P, with some spikes higher when Ps have more than one. For high + // GOMAXPROCS, or if this list otherwise gets long, it would be nice to + // have a way to batch work that allows preemption during processing. lock(&work.spanSPMCs.lock) - if gcphase != _GCoff || work.spanSPMCs.all == nil { + if gcphase != _GCoff || work.spanSPMCs.list.empty() { unlock(&work.spanSPMCs.lock) - return false + return } - rp := &work.spanSPMCs.all - gp := getg() - more := true - for i := 0; i < batchSize && !(preemptible && gp.preempt); i++ { - r := *rp - if r == nil { - more = false - break - } + r := (*spanSPMC)(work.spanSPMCs.list.head()) + for r != nil { + next := (*spanSPMC)(unsafe.Pointer(r.allnode.next)) if r.dead.Load() { - // It's dead. Deinitialize and free it. - *rp = r.allnext + // It's dead. Remove, deinitialize and free it. + work.spanSPMCs.list.remove(unsafe.Pointer(r)) r.deinit() mheap_.spanSPMCAlloc.free(unsafe.Pointer(r)) - } else { - // Still alive, likely in some P's chain. - // Skip it. - rp = &r.allnext } + r = next } unlock(&work.spanSPMCs.lock) - return more } // tryStealSpan attempts to steal a span from another P's local queue. diff --git a/src/runtime/mgcmark_nogreenteagc.go b/src/runtime/mgcmark_nogreenteagc.go index 9838887f7be..a0470c6e322 100644 --- a/src/runtime/mgcmark_nogreenteagc.go +++ b/src/runtime/mgcmark_nogreenteagc.go @@ -63,12 +63,16 @@ func (q *spanQueue) empty() bool { return true } -type spanSPMC struct { - _ sys.NotInHeap +func (q *spanQueue) destroy() { } -func freeSomeSpanSPMCs(preemptible bool) bool { - return false +type spanSPMC struct { + _ sys.NotInHeap + allnode listNodeManual +} + +func freeDeadSpanSPMCs() { + return } type objptr uintptr diff --git a/src/runtime/mgcpacer.go b/src/runtime/mgcpacer.go index 17e2f405e48..32c1b941e53 100644 --- a/src/runtime/mgcpacer.go +++ b/src/runtime/mgcpacer.go @@ -9,7 +9,7 @@ import ( "internal/goexperiment" "internal/runtime/atomic" "internal/runtime/math" - "internal/runtime/strconv" + "internal/strconv" _ "unsafe" // for go:linkname ) @@ -427,7 +427,7 @@ func (c *gcControllerState) startCycle(markStartTime int64, procs int, trigger g // Clear per-P state for _, p := range allp { p.gcAssistTime = 0 - p.gcFractionalMarkTime = 0 + p.gcFractionalMarkTime.Store(0) } if trigger.kind == gcTriggerTime { @@ -830,7 +830,7 @@ func (c *gcControllerState) findRunnableGCWorker(pp *p, now int64) (*g, int64) { // // This should be kept in sync with pollFractionalWorkerExit. delta := now - c.markStartTime - if delta > 0 && float64(pp.gcFractionalMarkTime)/float64(delta) > c.fractionalUtilizationGoal { + if delta > 0 && float64(pp.gcFractionalMarkTime.Load())/float64(delta) > c.fractionalUtilizationGoal { // Nope. No need to run a fractional worker. gcBgMarkWorkerPool.push(&node.node) return nil, now @@ -1313,8 +1313,8 @@ func readGOGC() int32 { if p == "off" { return -1 } - if n, ok := strconv.Atoi32(p); ok { - return n + if n, err := strconv.ParseInt(p, 10, 32); err == nil { + return int32(n) } return 100 } diff --git a/src/runtime/mgcpacer_test.go b/src/runtime/mgcpacer_test.go index ef1483d629e..4b9cbf55890 100644 --- a/src/runtime/mgcpacer_test.go +++ b/src/runtime/mgcpacer_test.go @@ -603,7 +603,6 @@ func TestGcPacer(t *testing.T) { // However, it is still possible to trigger this case if something exceptional // happens between calls to revise; the framework just doesn't support this yet. } { - e := e t.Run(e.name, func(t *testing.T) { t.Parallel() diff --git a/src/runtime/mgcscavenge_test.go b/src/runtime/mgcscavenge_test.go index 7b86ae8ffca..4f9dbac4815 100644 --- a/src/runtime/mgcscavenge_test.go +++ b/src/runtime/mgcscavenge_test.go @@ -285,7 +285,6 @@ func TestPallocDataFindScavengeCandidate(t *testing.T) { } } for name, v := range tests { - v := v t.Run(name, func(t *testing.T) { b := makePallocData(v.alloc, v.scavenged) start, size := b.FindScavengeCandidate(PallocChunkPages-1, v.min, v.max) @@ -447,7 +446,6 @@ func TestPageAllocScavenge(t *testing.T) { } } for name, v := range tests { - v := v t.Run(name, func(t *testing.T) { b := NewPageAlloc(v.beforeAlloc, v.beforeScav) defer FreePageAlloc(b) @@ -811,7 +809,6 @@ func TestScavengeIndex(t *testing.T) { ) } for _, test := range tests { - test := test t.Run("Bg/"+test.name, func(t *testing.T) { mark, find, nextGen := setup(t, false) test.mark(mark) diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go index 364cdb58ccb..c3d6afb90a5 100644 --- a/src/runtime/mgcsweep.go +++ b/src/runtime/mgcsweep.go @@ -307,10 +307,7 @@ func bgsweep(c chan int) { // N.B. freeSomeWbufs is already batched internally. goschedIfBusy() } - for freeSomeSpanSPMCs(true) { - // N.B. freeSomeSpanSPMCs is already batched internally. - goschedIfBusy() - } + freeDeadSpanSPMCs() lock(&sweep.lock) if !isSweepDone() { // This can happen if a GC runs between diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index f2dc3717b1b..711c7790eb7 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -1394,7 +1394,7 @@ HaveSpan: } // Initialize the span. - h.initSpan(s, typ, spanclass, base, npages) + h.initSpan(s, typ, spanclass, base, npages, scav) if valgrindenabled { valgrindMempoolMalloc(unsafe.Pointer(arenaBase(arenaIndex(base))), unsafe.Pointer(base), npages*pageSize) @@ -1440,11 +1440,17 @@ HaveSpan: // initSpan initializes a blank span s which will represent the range // [base, base+npages*pageSize). typ is the type of span being allocated. -func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, npages uintptr) { +func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, npages, scav uintptr) { // At this point, both s != nil and base != 0, and the heap // lock is no longer held. Initialize the span. s.init(base, npages) - if h.allocNeedsZero(base, npages) { + // Always call allocNeedsZero to update the arena's zeroedBase watermark + // and determine if the memory is considered dirty. + needZero := h.allocNeedsZero(base, npages) + // If these pages were scavenged (returned to the OS), the kernel guarantees + // they will be zero-filled on next use (fault-in), so we can treat them as + // already zeroed and skip explicit clearing. + if (needZeroAfterSysUnused() || scav != npages*pageSize) && needZero { s.needzero = 1 } nbytes := npages * pageSize diff --git a/src/runtime/mklockrank.go b/src/runtime/mklockrank.go index 9c503369a35..07b40396a51 100644 --- a/src/runtime/mklockrank.go +++ b/src/runtime/mklockrank.go @@ -196,6 +196,9 @@ defer, # xRegState allocator sched < xRegAlloc; +# spanSPMCs allocator and list +WB, sched < spanSPMCs; + # Span allocator stackLarge, stackpool, @@ -209,7 +212,7 @@ stackLarge, # Pinner bits might be freed by the span allocator. mheap, mspanSpecial < mheapSpecial; # Fixallocs -mheap, mheapSpecial, xRegAlloc < globalAlloc; +mheap, mheapSpecial, xRegAlloc, spanSPMCs < globalAlloc; # Execution tracer events (with a P) hchan, diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go index 5cdf5139409..ca1c6e4a0e6 100644 --- a/src/runtime/mkpreempt.go +++ b/src/runtime/mkpreempt.go @@ -163,19 +163,21 @@ package runtime type xRegs struct { `) pos := 0 - for _, reg := range l.regs { - if reg.pos != pos { - log.Fatalf("padding not implemented") + for _, seq := range l.regs { + for _, r := range seq.regs { + if r.pos != pos && !seq.fixedOffset { + log.Fatalf("padding not implemented") + } + typ := fmt.Sprintf("[%d]byte", r.size) + switch { + case r.size == 4 && r.pos%4 == 0: + typ = "uint32" + case r.size == 8 && r.pos%8 == 0: + typ = "uint64" + } + fmt.Fprintf(g.w, "\t%s %s\n", r.name, typ) + pos += r.size } - typ := fmt.Sprintf("[%d]byte", reg.size) - switch { - case reg.size == 4 && reg.pos%4 == 0: - typ = "uint32" - case reg.size == 8 && reg.pos%8 == 0: - typ = "uint64" - } - fmt.Fprintf(g.w, "\t%s %s\n", reg.reg, typ) - pos += reg.size } fmt.Fprintf(g.w, "}\n") @@ -191,16 +193,61 @@ type xRegs struct { type layout struct { stack int - regs []regPos + regs []regSeq sp string // stack pointer register } -type regPos struct { - pos, size int +type regInfo struct { + size int // register size in bytes + name string // register name + // Some register names may require a specific suffix. + // In ARM64, a suffix called an "arrangement specifier" can be added to + // a register name. For example: + // + // V0.B16 + // + // In this case, "V0" is the register name, and ".B16" is the suffix. + suffix string + + pos int // position on stack +} + +// Some save/restore operations can involve multiple registers in a single +// instruction. For example, the LDP/STP instructions in ARM64: +// +// LDP 8(RSP), (R0, R1) +// STP (R0, R1), 8(RSP) +// +// In these cases, a pair of registers (R0, R1) is used as a single argument. +type regSeq struct { saveOp string restoreOp string - reg string + regs []regInfo + + // By default, all registers are saved on the stack, and the stack pointer offset + // is calculated based on the size of each register. For example (ARM64): + // + // STP (R0, R1), 8(RSP) + // STP (R2, R3), 24(RSP) + // + // However, automatic offset calculation may not always be desirable. + // In some cases, the offset must remain fixed: + // + // VST1.P [V0.B16, V1.B16, V2.B16, V3.B16], 64(R0) + // VST1.P [V4.B16, V5.B16, V6.B16, V7.B16], 64(R0) + // + // In this example, R0 is post-incremented after each instruction, + // so the offset should not be recalculated. For such cases, + // `fixedOffset` is set to true. + fixedOffset bool + + // After conversion to a string, register names are separated by commas + // and may be wrapped in a custom pair of brackets. For example (ARM64): + // + // (R0, R1) // wrapped in parentheses + // [V0.B16, V1.B16, V2.B16, V3.B16] // wrapped in square brackets + brackets [2]string // If this register requires special save and restore, these // give those operations with a %d placeholder for the stack @@ -208,40 +255,95 @@ type regPos struct { save, restore string } -func (l *layout) add(op, reg string, size int) { - l.regs = append(l.regs, regPos{saveOp: op, restoreOp: op, reg: reg, pos: l.stack, size: size}) +func (l *layout) add(op, regname string, size int) { + l.regs = append(l.regs, regSeq{saveOp: op, restoreOp: op, regs: []regInfo{{size, regname, "", l.stack}}}) l.stack += size } -func (l *layout) add2(sop, rop, reg string, size int) { - l.regs = append(l.regs, regPos{saveOp: sop, restoreOp: rop, reg: reg, pos: l.stack, size: size}) - l.stack += size +func (l *layout) add2(sop, rop string, regs []regInfo, brackets [2]string, fixedOffset bool) { + l.regs = append(l.regs, regSeq{saveOp: sop, restoreOp: rop, regs: regs, brackets: brackets, fixedOffset: fixedOffset}) + if !fixedOffset { + for i := range regs { + regs[i].pos = l.stack + l.stack += regs[i].size + } + } } func (l *layout) addSpecial(save, restore string, size int) { - l.regs = append(l.regs, regPos{save: save, restore: restore, pos: l.stack, size: size}) + l.regs = append(l.regs, regSeq{save: save, restore: restore, regs: []regInfo{{size, "", "", l.stack}}}) l.stack += size } +func (rs *regSeq) String() string { + switch len(rs.regs) { + case 0: + log.Fatal("Register sequence must not be empty!") + case 1: + return rs.regs[0].name + default: + names := make([]string, 0) + for _, r := range rs.regs { + name := r.name + r.suffix + names = append(names, name) + } + return rs.brackets[0] + strings.Join(names, ", ") + rs.brackets[1] + } + return "" +} + func (l *layout) save(g *gen) { - for _, reg := range l.regs { - if reg.save != "" { - g.p(reg.save, reg.pos) + for _, seq := range l.regs { + if len(seq.regs) < 1 { + log.Fatal("Register sequence must not be empty!") + } + // When dealing with a sequence of registers, we assume that only the position + // of the first register is relevant. For example: + // + // STP (R0, R1), 8(RSP) + // STP (R2, R3), 24(RSP) + // + // Here, R0.pos is 8. While we can infer that R1.pos is 16, it doesn't need to + // be explicitly specified, as the STP instruction calculates it automatically. + pos := seq.regs[0].pos + if seq.save != "" { + g.p(seq.save, pos) } else { - g.p("%s %s, %d(%s)", reg.saveOp, reg.reg, reg.pos, l.sp) + name := seq.String() + g.p("%s %s, %d(%s)", seq.saveOp, name, pos, l.sp) + } + } +} + +func (l *layout) restoreInOrder(g *gen, reverse bool) { + var seq []regSeq + if reverse { + seq = make([]regSeq, 0) + for i := len(l.regs) - 1; i >= 0; i-- { + seq = append(seq, l.regs[i]) + } + } else { + seq = l.regs + } + for _, reg := range seq { + if len(reg.regs) < 1 { + log.Fatal("Register sequence must not be empty!") + } + pos := reg.regs[0].pos + if reg.restore != "" { + g.p(reg.restore, pos) + } else { + g.p("%s %d(%s), %s", reg.restoreOp, pos, l.sp, reg.String()) } } } func (l *layout) restore(g *gen) { - for i := len(l.regs) - 1; i >= 0; i-- { - reg := l.regs[i] - if reg.restore != "" { - g.p(reg.restore, reg.pos) - } else { - g.p("%s %d(%s), %s", reg.restoreOp, reg.pos, l.sp, reg.reg) - } - } + l.restoreInOrder(g, true) +} + +func (l *layout) restoreDirect(g *gen) { + l.restoreInOrder(g, false) } func gen386(g *gen) { @@ -320,8 +422,11 @@ func genAMD64(g *gen) { // We don't have to do this, but it results in a nice Go type. If we split // this into multiple types, we probably should stop doing this. for i := range lXRegs.regs { - lXRegs.regs[i].pos = lZRegs.regs[i].pos - lYRegs.regs[i].pos = lZRegs.regs[i].pos + for j := range lXRegs.regs[i].regs { + lXRegs.regs[i].regs[j].pos = lZRegs.regs[i].regs[j].pos + lYRegs.regs[i].regs[j].pos = lZRegs.regs[i].regs[j].pos + } + } writeXRegs(g.goarch, &lZRegs) @@ -456,6 +561,7 @@ func genARM(g *gen) { } func genARM64(g *gen) { + const vReg = "R0" // *xRegState p := g.p // Add integer registers R0-R26 // R27 (REGTMP), R28 (g), R29 (FP), R30 (LR), R31 (SP) are special @@ -466,8 +572,11 @@ func genARM64(g *gen) { i-- continue // R18 is not used, skip } - reg := fmt.Sprintf("(R%d, R%d)", i, i+1) - l.add2("STP", "LDP", reg, 16) + regs := []regInfo{ + {name: fmt.Sprintf("R%d", i), size: 8}, + {name: fmt.Sprintf("R%d", i+1), size: 8}, + } + l.add2("STP", "LDP", regs, [2]string{"(", ")"}, false) } // Add flag registers. l.addSpecial( @@ -480,10 +589,17 @@ func genARM64(g *gen) { 8) // TODO: FPCR? I don't think we'll change it, so no need to save. // Add floating point registers F0-F31. - for i := 0; i < 31; i += 2 { - reg := fmt.Sprintf("(F%d, F%d)", i, i+1) - l.add2("FSTPD", "FLDPD", reg, 16) + lVRegs := layout{sp: vReg} // Non-GP registers + for i := 0; i < 31; i += 4 { + regs := []regInfo{ + {name: fmt.Sprintf("V%d", i), suffix: ".B16", size: 16, pos: 64}, + {name: fmt.Sprintf("V%d", i+1), suffix: ".B16", size: 16, pos: 64}, + {name: fmt.Sprintf("V%d", i+2), suffix: ".B16", size: 16, pos: 64}, + {name: fmt.Sprintf("V%d", i+3), suffix: ".B16", size: 16, pos: 64}, + } + lVRegs.add2("VST1.P", "VLD1.P", regs, [2]string{"[", "]"}, true) } + writeXRegs(g.goarch, &lVRegs) if l.stack%16 != 0 { l.stack += 8 // SP needs 16-byte alignment } @@ -500,8 +616,20 @@ func genARM64(g *gen) { p("MOVD R30, (RSP)") p("#endif") + p("// Save GPs") l.save(g) + p("// Save extended register state to p.xRegs.scratch") + p("MOVD g_m(g), %s", vReg) + p("MOVD m_p(%s), %s", vReg, vReg) + p("ADD $(p_xRegs+xRegPerP_scratch), %s, %s", vReg, vReg) + lVRegs.save(g) p("CALL ·asyncPreempt2(SB)") + p("// Restore non-GPs from *p.xRegs.cache") + p("MOVD g_m(g), %s", vReg) + p("MOVD m_p(%s), %s", vReg, vReg) + p("MOVD (p_xRegs+xRegPerP_cache)(%s), %s", vReg, vReg) + lVRegs.restoreDirect(g) + p("// Restore GPs") l.restore(g) p("MOVD %d(RSP), R30", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it @@ -585,10 +713,11 @@ func genMIPS(g *gen, _64bit bool) { } func genLoong64(g *gen) { - p := g.p + const xReg = "R4" // *xRegState + + p, label := g.p, g.label mov := "MOVV" - movf := "MOVD" add := "ADDV" sub := "SUBV" regsize := 8 @@ -604,12 +733,6 @@ func genLoong64(g *gen) { l.add(mov, reg, regsize) } - // Add floating point registers F0-F31. - for i := 0; i <= 31; i++ { - reg := fmt.Sprintf("F%d", i) - l.add(movf, reg, regsize) - } - // Add condition flag register fcc0-fcc7 sv := "" rs := "" @@ -636,12 +759,80 @@ func genLoong64(g *gen) { mov+" %d(R3), R5\n"+rs, regsize) + // Create layouts for lasx, lsx and fp registers. + lasxRegs := layout{sp: xReg} + lsxRegs := lasxRegs + fpRegs := lasxRegs + for i := 0; i <= 31; i++ { + lasxRegs.add("XVMOVQ", fmt.Sprintf("X%d", i), 256/8) + lsxRegs.add("VMOVQ", fmt.Sprintf("V%d", i), 128/8) + fpRegs.add("MOVD", fmt.Sprintf("F%d", i), 64/8) + } + + for i := range lsxRegs.regs { + for j := range lsxRegs.regs[i].regs { + lsxRegs.regs[i].regs[j].pos = lasxRegs.regs[i].regs[j].pos + fpRegs.regs[i].regs[j].pos = lasxRegs.regs[i].regs[j].pos + } + } + writeXRegs(g.goarch, &lasxRegs) + // allocate frame, save PC of interrupted instruction (in LR) p(mov+" R1, -%d(R3)", l.stack) p(sub+" $%d, R3", l.stack) + p("// Save GPs") l.save(g) + + p("// Save extended register state to p.xRegs.scratch") + p("MOVV g_m(g), %s", xReg) + p("MOVV m_p(%s), %s", xReg, xReg) + p("ADDV $(p_xRegs+xRegPerP_scratch), %s, %s", xReg, xReg) + + p("MOVBU internal∕cpu·Loong64+const_offsetLOONG64HasLASX(SB), R5") + p("BNE R5, saveLASX") + + p("MOVBU internal∕cpu·Loong64+const_offsetLOONG64HasLSX(SB), R5") + p("BNE R5, saveLSX") + + label("saveFP:") + fpRegs.save(g) + p("JMP preempt") + + label("saveLSX:") + lsxRegs.save(g) + p("JMP preempt") + + label("saveLASX:") + lasxRegs.save(g) + + label("preempt:") p("CALL ·asyncPreempt2(SB)") + + p("// Restore non-GPs from *p.xRegs.cache") + p("MOVV g_m(g), %s", xReg) + p("MOVV m_p(%s), %s", xReg, xReg) + p("MOVV (p_xRegs+xRegPerP_cache)(%s), %s", xReg, xReg) + + p("MOVBU internal∕cpu·Loong64+const_offsetLOONG64HasLASX(SB), R5") + p("BNE R5, restoreLASX") + + p("MOVBU internal∕cpu·Loong64+const_offsetLOONG64HasLSX(SB), R5") + p("BNE R5, restoreLSX") + + label("restoreFP:") + fpRegs.restore(g) + p("JMP restoreGPs") + + label("restoreLSX:") + lsxRegs.restore(g) + p("JMP restoreGPs") + + label("restoreLASX:") + lasxRegs.restore(g) + + p("// Restore GPs") + label("restoreGPs:") l.restore(g) p(mov+" %d(R3), R1", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it diff --git a/src/runtime/mpagealloc_32bit.go b/src/runtime/mpagealloc_32bit.go index 4e99be1c2ab..44c7beecbc3 100644 --- a/src/runtime/mpagealloc_32bit.go +++ b/src/runtime/mpagealloc_32bit.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build 386 || arm || mips || mipsle || wasm +//go:build 386 || arm || mips || mipsle || wasm || (gccgo && (ppc || s390)) // wasm is a treated as a 32-bit architecture for the purposes of the page // allocator, even though it has 64-bit pointers. This is because any wasm diff --git a/src/runtime/mpagealloc_64bit.go b/src/runtime/mpagealloc_64bit.go index eb425f07044..2e3643004bc 100644 --- a/src/runtime/mpagealloc_64bit.go +++ b/src/runtime/mpagealloc_64bit.go @@ -180,9 +180,6 @@ func (p *pageAlloc) sysGrow(base, limit uintptr) { sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size()) p.summaryMappedReady += need.size() } - - // Update the scavenge index. - p.summaryMappedReady += p.scav.index.sysGrow(base, limit, p.sysStat) } // sysGrow increases the index's backing store in response to a heap growth. diff --git a/src/runtime/mpagealloc_test.go b/src/runtime/mpagealloc_test.go index ded7a799223..45badcb2606 100644 --- a/src/runtime/mpagealloc_test.go +++ b/src/runtime/mpagealloc_test.go @@ -181,7 +181,6 @@ func TestPageAllocGrow(t *testing.T) { } } for name, v := range tests { - v := v t.Run(name, func(t *testing.T) { // By creating a new pageAlloc, we will // grow it for each chunk defined in x. @@ -678,7 +677,6 @@ func TestPageAllocAlloc(t *testing.T) { } } for name, v := range tests { - v := v t.Run(name, func(t *testing.T) { b := NewPageAlloc(v.before, v.scav) defer FreePageAlloc(b) @@ -705,7 +703,6 @@ func TestPageAllocExhaust(t *testing.T) { t.Skip("skipping because virtual memory is limited; see #36210") } for _, npages := range []uintptr{1, 2, 3, 4, 5, 8, 16, 64, 1024, 1025, 2048, 2049} { - npages := npages t.Run(fmt.Sprintf("%d", npages), func(t *testing.T) { // Construct b. bDesc := make(map[ChunkIdx][]BitRange) @@ -973,7 +970,6 @@ func TestPageAllocFree(t *testing.T) { } } for name, v := range tests { - v := v t.Run(name, func(t *testing.T) { b := NewPageAlloc(v.before, nil) defer FreePageAlloc(b) @@ -1028,7 +1024,6 @@ func TestPageAllocAllocAndFree(t *testing.T) { }, } for name, v := range tests { - v := v t.Run(name, func(t *testing.T) { b := NewPageAlloc(v.init, nil) defer FreePageAlloc(b) diff --git a/src/runtime/mpagecache_test.go b/src/runtime/mpagecache_test.go index 19b4e04807e..523a1c0b07f 100644 --- a/src/runtime/mpagecache_test.go +++ b/src/runtime/mpagecache_test.go @@ -164,7 +164,6 @@ func TestPageCacheAlloc(t *testing.T) { }, } for name, test := range tests { - test := test t.Run(name, func(t *testing.T) { c := test.cache for i, h := range test.hits { @@ -407,7 +406,6 @@ func TestPageAllocAllocToCache(t *testing.T) { } } for name, v := range tests { - v := v t.Run(name, func(t *testing.T) { b := NewPageAlloc(v.beforeAlloc, v.beforeScav) defer FreePageAlloc(b) diff --git a/src/runtime/mpallocbits_test.go b/src/runtime/mpallocbits_test.go index cf49f775078..755f423f960 100644 --- a/src/runtime/mpallocbits_test.go +++ b/src/runtime/mpallocbits_test.go @@ -200,7 +200,6 @@ func TestMallocBitsPopcntRange(t *testing.T) { } } for name, v := range tests { - v := v t.Run(name, func(t *testing.T) { b := makePallocBits(v.init) for _, h := range v.tests { @@ -291,7 +290,6 @@ func TestPallocBitsSummarize(t *testing.T) { }, } for name, v := range tests { - v := v t.Run(name, func(t *testing.T) { b := makePallocBits(v.free) // In the PallocBits we create 1's represent free spots, but in our actual @@ -436,7 +434,6 @@ func TestPallocBitsAlloc(t *testing.T) { } } for name, v := range tests { - v := v t.Run(name, func(t *testing.T) { b := makePallocBits(v.before) for iter, i := range v.hits { @@ -498,7 +495,6 @@ func TestPallocBitsFree(t *testing.T) { } } for name, v := range tests { - v := v t.Run(name, func(t *testing.T) { b := makePallocBits(v.beforeInv) invertPallocBits(b) diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go index 0957e67b50f..f1703a7ebab 100644 --- a/src/runtime/mprof.go +++ b/src/runtime/mprof.go @@ -444,7 +444,7 @@ func mProf_Malloc(mp *m, p unsafe.Pointer, size uintptr) { } // Only use the part of mp.profStack we need and ignore the extra space // reserved for delayed inline expansion with frame pointer unwinding. - nstk := callers(5, mp.profStack[:debug.profstackdepth]) + nstk := callers(3, mp.profStack[:debug.profstackdepth+2]) index := (mProfCycle.read() + 2) % uint32(len(memRecord{}.future)) b := stkbucket(memProfile, size, mp.profStack[:nstk], true) @@ -1261,7 +1261,7 @@ func goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.P //go:linkname pprof_goroutineLeakProfileWithLabels func pprof_goroutineLeakProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) { - return goroutineLeakProfileWithLabelsConcurrent(p, labels) + return goroutineLeakProfileWithLabels(p, labels) } // labels may be nil. If labels is non-nil, it must have the same length as p. @@ -1323,30 +1323,26 @@ func goroutineLeakProfileWithLabelsConcurrent(p []profilerecord.StackRecord, lab return work.goroutineLeak.count, false } - // Use the same semaphore as goroutineProfileWithLabelsConcurrent, - // because ultimately we still use goroutine profiles. - semacquire(&goroutineProfile.sema) - - // Unlike in goroutineProfileWithLabelsConcurrent, we don't need to - // save the current goroutine stack, because it is obviously not leaked. - pcbuf := makeProfStack() // see saveg() for explanation // Prepare a profile large enough to store all leaked goroutines. n = work.goroutineLeak.count if n > len(p) { - // There's not enough space in p to store the whole profile, so (per the - // contract of runtime.GoroutineProfile) we're not allowed to write to p - // at all and must return n, false. - semrelease(&goroutineProfile.sema) + // There's not enough space in p to store the whole profile, so + // we're not allowed to write to p at all and must return n, false. return n, false } // Visit each leaked goroutine and try to record its stack. + var offset int forEachGRace(func(gp1 *g) { - if readgstatus(gp1) == _Gleaked { - doRecordGoroutineProfile(gp1, pcbuf) + if readgstatus(gp1)&^_Gscan == _Gleaked { + systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &p[offset], pcbuf) }) + if labels != nil { + labels[offset] = gp1.labels + } + offset++ } }) @@ -1354,7 +1350,6 @@ func goroutineLeakProfileWithLabelsConcurrent(p []profilerecord.StackRecord, lab raceacquire(unsafe.Pointer(&labelSync)) } - semrelease(&goroutineProfile.sema) return n, true } @@ -1454,7 +1449,7 @@ func goroutineProfileWithLabelsConcurrent(p []profilerecord.StackRecord, labels // were collecting the profile. But probably better to return a // truncated profile than to crash the whole process. // - // For instance, needm moves a goroutine out of the _Gdead state and so + // For instance, needm moves a goroutine out of the _Gdeadextra state and so // might be able to change the goroutine count without interacting with // the scheduler. For code like that, the race windows are small and the // combination of features is uncommon, so it's hard to be (and remain) @@ -1480,7 +1475,7 @@ func tryRecordGoroutineProfileWB(gp1 *g) { // in the current goroutine profile: either that it should not be profiled, or // that a snapshot of its call stack and labels are now in the profile. func tryRecordGoroutineProfile(gp1 *g, pcbuf []uintptr, yield func()) { - if readgstatus(gp1) == _Gdead { + if status := readgstatus(gp1); status == _Gdead || status == _Gdeadextra { // Dead goroutines should not appear in the profile. Goroutines that // start while profile collection is active will get goroutineProfiled // set to goroutineProfileSatisfied before transitioning out of _Gdead, @@ -1535,7 +1530,18 @@ func doRecordGoroutineProfile(gp1 *g, pcbuf []uintptr) { // everything else, we just don't record the stack in the profile. return } - if readgstatus(gp1) == _Grunning { + // Double-check that we didn't make a grave mistake. If the G is running then in + // general, we cannot safely read its stack. + // + // However, there is one case where it's OK. There's a small window of time in + // exitsyscall where a goroutine could be in _Grunning as it's exiting a syscall. + // This is OK because goroutine will not exit the syscall until it passes through + // a call to tryRecordGoroutineProfile. (An explicit one on the fast path, an + // implicit one via the scheduler on the slow path.) + // + // This is also why it's safe to check syscallsp here. The syscall path mutates + // syscallsp only after passing through tryRecordGoroutineProfile. + if readgstatus(gp1) == _Grunning && gp1.syscallsp == 0 { print("doRecordGoroutineProfile gp1=", gp1.goid, "\n") throw("cannot read stack of running goroutine") } @@ -1570,7 +1576,16 @@ func goroutineProfileWithLabelsSync(p []profilerecord.StackRecord, labels []unsa isOK := func(gp1 *g) bool { // Checking isSystemGoroutine here makes GoroutineProfile // consistent with both NumGoroutine and Stack. - return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1, false) + if gp1 == gp { + return false + } + if status := readgstatus(gp1); status == _Gdead || status == _Gdeadextra { + return false + } + if isSystemGoroutine(gp1, false) { + return false + } + return true } pcbuf := makeProfStack() // see saveg() for explanation diff --git a/src/runtime/msan/msan.go b/src/runtime/msan/msan.go index 8d4471b816e..7a30581de77 100644 --- a/src/runtime/msan/msan.go +++ b/src/runtime/msan/msan.go @@ -13,8 +13,6 @@ package msan #include #include -extern void __msan_memmove(void*, const void*, uintptr_t); - void __msan_read_go(void *addr, uintptr_t sz) { __msan_check_mem_is_initialized(addr, sz); } @@ -32,7 +30,11 @@ void __msan_free_go(void *addr, uintptr_t sz) { } void __msan_memmove_go(void *to, const void *from, uintptr_t sz) { - __msan_memmove(to, from, sz); + // Note: don't use msan_memmove, as it actually does + // the move. We do the move ourselves, so it isn't necessary. + // Also, it clobbers the target before we issue the write + // barrier, which causes pointers to get lost. See issue 76138. + __msan_copy_shadow(to, from, sz); } */ import "C" diff --git a/src/runtime/mspanset.go b/src/runtime/mspanset.go index 21b105194e4..68d2dd0d1ee 100644 --- a/src/runtime/mspanset.go +++ b/src/runtime/mspanset.go @@ -149,6 +149,11 @@ retry: // pop is safe to call concurrently with other pop and push operations. func (b *spanSet) pop() *mspan { var head, tail uint32 + var backoff uint32 + // TODO: tweak backoff parameters on other architectures. + if GOARCH == "arm64" { + backoff = 128 + } claimLoop: for { headtail := b.index.load() @@ -177,6 +182,14 @@ claimLoop: if b.index.cas(headtail, makeHeadTailIndex(want+1, tail)) { break claimLoop } + // Use a backoff approach to reduce demand to the shared memory location + // decreases memory contention and allows for other threads to make quicker + // progress. + // Read more in this Arm blog post: + // https://community.arm.com/arm-community-blogs/b/architectures-and-processors-blog/posts/multi-threaded-applications-arm + procyield(backoff) + // Increase backoff time. + backoff += backoff / 2 headtail = b.index.load() head, tail = headtail.split() } @@ -407,7 +420,7 @@ func (p *atomicMSpanPointer) Load() *mspan { return (*mspan)(p.p.Load()) } -// Store stores an *mspan. +// StoreNoWB stores an *mspan. func (p *atomicMSpanPointer) StoreNoWB(s *mspan) { p.p.StoreNoWB(unsafe.Pointer(s)) } diff --git a/src/runtime/mstats.go b/src/runtime/mstats.go index e34f0b10ea0..febfb69a3a3 100644 --- a/src/runtime/mstats.go +++ b/src/runtime/mstats.go @@ -914,7 +914,7 @@ type cpuStats struct { ScavengeTotalTime int64 IdleTime int64 // Time Ps spent in _Pidle. - UserTime int64 // Time Ps spent in _Prunning or _Psyscall that's not any of the above. + UserTime int64 // Time Ps spent in _Prunning that's not any of the above. TotalTime int64 // GOMAXPROCS * (monotonic wall clock time elapsed) } @@ -976,7 +976,7 @@ func (s *cpuStats) accumulate(now int64, gcMarkPhase bool) { // Compute userTime. We compute this indirectly as everything that's not the above. // // Since time spent in _Pgcstop is covered by gcPauseTime, and time spent in _Pidle - // is covered by idleTime, what we're left with is time spent in _Prunning and _Psyscall, + // is covered by idleTime, what we're left with is time spent in _Prunning, // the latter of which is fine because the P will either go idle or get used for something // else via sysmon. Meanwhile if we subtract GC time from whatever's left, we get non-GC // _Prunning time. Note that this still leaves time spent in sweeping and in the scheduler, diff --git a/src/runtime/os_darwin.go b/src/runtime/os_darwin.go index ab8aa8037b9..79cd2997c7d 100644 --- a/src/runtime/os_darwin.go +++ b/src/runtime/os_darwin.go @@ -15,6 +15,11 @@ type mOS struct { mutex pthreadmutex cond pthreadcond count int + + // address of errno variable for this thread. + // This is an optimization to avoid calling libc_error + // on every syscall_rawsyscalln. + errnoAddr *int32 } func unimplemented(name string) { @@ -341,6 +346,7 @@ func minit() { } minitSignalMask() getg().m.procid = uint64(pthread_self()) + libc_error_addr(&getg().m.errnoAddr) } // Called from dropm to undo the effect of an minit. diff --git a/src/runtime/os_dragonfly.go b/src/runtime/os_dragonfly.go index fbbee64fd38..c34af7f072a 100644 --- a/src/runtime/os_dragonfly.go +++ b/src/runtime/os_dragonfly.go @@ -113,7 +113,7 @@ func futexsleep1(addr *uint32, val uint32, ns int64) { // The timeout is specified in microseconds - ensure that we // do not end up dividing to zero, which would put us to sleep // indefinitely... - timeout = timediv(ns, 1000, nil) + timeout = int32(ns / 1000) if timeout == 0 { timeout = 1 } diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go index 080dd965323..7e6af22d48a 100644 --- a/src/runtime/os_linux.go +++ b/src/runtime/os_linux.go @@ -8,8 +8,8 @@ import ( "internal/abi" "internal/goarch" "internal/runtime/atomic" - "internal/runtime/strconv" "internal/runtime/syscall/linux" + "internal/strconv" "unsafe" ) @@ -339,8 +339,8 @@ func getHugePageSize() uintptr { return 0 } n-- // remove trailing newline - v, ok := strconv.Atoi(slicebytetostringtmp((*byte)(ptr), int(n))) - if !ok || v < 0 { + v, err := strconv.Atoi(slicebytetostringtmp((*byte)(ptr), int(n))) + if err != nil || v < 0 { v = 0 } if v&(v-1) != 0 { @@ -435,9 +435,6 @@ func setitimer(mode int32, new, old *itimerval) //go:noescape func timer_create(clockid int32, sevp *sigevent, timerid *int32) int32 -//go:noescape -func timer_settime(timerid int32, flags int32, new, old *itimerspec) int32 - //go:noescape func timer_delete(timerid int32) int32 diff --git a/src/runtime/os_linux_futex32.go b/src/runtime/os_linux_futex32.go index fdf99e56693..c5cffa24d15 100644 --- a/src/runtime/os_linux_futex32.go +++ b/src/runtime/os_linux_futex32.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build linux && (386 || arm || mips || mipsle || ppc) +//go:build linux && (386 || arm || mips || mipsle || (gccgo && (ppc || s390))) package runtime diff --git a/src/runtime/os_linux_futex64.go b/src/runtime/os_linux_futex64.go index 487d0e03978..2448e30cf1b 100644 --- a/src/runtime/os_linux_futex64.go +++ b/src/runtime/os_linux_futex64.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build linux && !(386 || arm || mips || mipsle || ppc || s390) +//go:build linux && !(386 || arm || mips || mipsle || (gccgo && (ppc || s390))) package runtime diff --git a/src/runtime/os_linux_settime32.go b/src/runtime/os_linux_settime32.go new file mode 100644 index 00000000000..e6c5d9f95c0 --- /dev/null +++ b/src/runtime/os_linux_settime32.go @@ -0,0 +1,47 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && (386 || arm || mips || mipsle || (gccgo && (ppc || s390))) + +package runtime + +import "internal/runtime/atomic" + +var timer32bitOnly atomic.Bool + +//go:noescape +func timer_settime32(timerid int32, flags int32, new, old *itimerspec32) int32 + +//go:noescape +func timer_settime64(timerid int32, flags int32, new, old *itimerspec) int32 + +//go:nosplit +func timer_settime(timerid int32, flags int32, new, old *itimerspec) int32 { + if !timer32bitOnly.Load() { + ret := timer_settime64(timerid, flags, new, old) + // timer_settime64 is only supported on Linux 5.0+ + if ret != -_ENOSYS { + return ret + } + timer32bitOnly.Store(true) + } + + var newts, oldts itimerspec32 + var new32, old32 *itimerspec32 + + if new != nil { + newts.it_interval.setNsec(new.it_interval.tv_sec*1e9 + new.it_interval.tv_nsec) + newts.it_value.setNsec(new.it_value.tv_sec*1e9 + new.it_value.tv_nsec) + new32 = &newts + } + + if old != nil { + oldts.it_interval.setNsec(old.it_interval.tv_sec*1e9 + old.it_interval.tv_nsec) + oldts.it_value.setNsec(old.it_value.tv_sec*1e9 + old.it_value.tv_nsec) + old32 = &oldts + } + + // Fall back to 32-bit timer + return timer_settime32(timerid, flags, new32, old32) +} diff --git a/src/runtime/os_linux_settime64.go b/src/runtime/os_linux_settime64.go new file mode 100644 index 00000000000..dfacf5612d8 --- /dev/null +++ b/src/runtime/os_linux_settime64.go @@ -0,0 +1,10 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && !(386 || arm || mips || mipsle || (gccgo && (ppc || s390))) + +package runtime + +//go:noescape +func timer_settime(timerid int32, flags int32, new, old *itimerspec) int32 diff --git a/src/runtime/os_plan9.go b/src/runtime/os_plan9.go index 72a86579854..80c101f1a18 100644 --- a/src/runtime/os_plan9.go +++ b/src/runtime/os_plan9.go @@ -486,7 +486,7 @@ func semacreate(mp *m) { func semasleep(ns int64) int { gp := getg() if ns >= 0 { - ms := timediv(ns, 1000000, nil) + ms := int32(ns / 1000000) if ms == 0 { ms = 1 } diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go index 7610802e0f7..1688ba910e6 100644 --- a/src/runtime/os_windows.go +++ b/src/runtime/os_windows.go @@ -191,8 +191,8 @@ type mOS struct { // complete. // // TODO(austin): We may not need this if preemption were more - // tightly synchronized on the G/P status and preemption - // blocked transition into _Gsyscall/_Psyscall. + // tightly synchronized on the G status and preemption + // blocked transition into _Gsyscall. preemptExtLock uint32 } @@ -664,7 +664,7 @@ func semasleep(ns int64) int32 { start := nanotime() elapsed := int64(0) for { - ms := int64(timediv(ns-elapsed, 1000000, nil)) + ms := (ns - elapsed) / 1000000 if ms == 0 { ms = 1 } diff --git a/src/runtime/panic.go b/src/runtime/panic.go index d7bce70fe5a..e1105afd0fd 100644 --- a/src/runtime/panic.go +++ b/src/runtime/panic.go @@ -556,15 +556,13 @@ func deferprocStack(d *_defer) { d.sp = sys.GetCallerSP() d.pc = sys.GetCallerPC() // The lines below implement: - // d.panic = nil - // d.fd = nil // d.link = gp._defer // d.head = nil // gp._defer = d - // But without write barriers. The first three are writes to + // But without write barriers. The first two are writes to // the stack so they don't need a write barrier, and furthermore // are to uninitialized memory, so they must not use a write barrier. - // The fourth write does not require a write barrier because we + // The third write does not require a write barrier because we // explicitly mark all the defer structures, so we don't need to // keep track of pointers to them with a write barrier. *(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer)) @@ -1115,7 +1113,7 @@ func gorecover() any { // frame between gopanic and gorecover. // // We don't recover this: - // defer func() { func() { recover() }() } + // defer func() { func() { recover() }() }() // because there are 2 non-wrapper frames. // // We don't recover this: @@ -1244,10 +1242,12 @@ func throw(s string) { // //go:nosplit func fatal(s string) { + p := getg()._panic // Everything fatal does should be recursively nosplit so it // can be called even when it's unsafe to grow the stack. printlock() // Prevent multiple interleaved fatal reports. See issue 69447. systemstack(func() { + printPreFatalDeferPanic(p) print("fatal error: ") printindented(s) // logically printpanicval(s), but avoids convTstring write barrier print("\n") @@ -1257,6 +1257,27 @@ func fatal(s string) { printunlock() } +// printPreFatalDeferPanic prints the panic +// when fatal occurs in panics while running defer. +func printPreFatalDeferPanic(p *_panic) { + // Don`t call preprintpanics, because + // don't want to call String/Error on the panicked values. + // When we fatal we really want to just print and exit, + // no more executing user Go code. + for x := p; x != nil; x = x.link { + if x.link != nil && *efaceOf(&x.link.arg) == *efaceOf(&x.arg) { + // This panic contains the same value as the next one in the chain. + // Mark it as repanicked. We will skip printing it twice in a row. + x.link.repanicked = true + } + } + if p != nil { + printpanics(p) + // make fatal have the same indentation as non-first panics. + print("\t") + } +} + // runningPanicDefers is non-zero while running deferred functions for panic. // This is used to try hard to get a panic stack trace out when exiting. var runningPanicDefers atomic.Uint32 diff --git a/src/runtime/panic32.go b/src/runtime/panic32.go index 9dd4c0eb2e8..7abc9f595bd 100644 --- a/src/runtime/panic32.go +++ b/src/runtime/panic32.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build 386 || arm || mips || mipsle +//go:build 386 || arm || mips || mipsle || (gccgo && (ppc || s390)) package runtime diff --git a/src/runtime/panic_test.go b/src/runtime/panic_test.go index 994abfdd455..2b06bce45d2 100644 --- a/src/runtime/panic_test.go +++ b/src/runtime/panic_test.go @@ -18,10 +18,10 @@ func TestPanicWithDirectlyPrintableCustomTypes(t *testing.T) { wantPanicPrefix string }{ {"panicCustomBool", `panic: main.MyBool(true)`}, - {"panicCustomComplex128", `panic: main.MyComplex128(+3.210000e+001+1.000000e+001i)`}, - {"panicCustomComplex64", `panic: main.MyComplex64(+1.100000e-001+3.000000e+000i)`}, - {"panicCustomFloat32", `panic: main.MyFloat32(-9.370000e+001)`}, - {"panicCustomFloat64", `panic: main.MyFloat64(-9.370000e+001)`}, + {"panicCustomComplex128", `panic: main.MyComplex128(32.1+10i)`}, + {"panicCustomComplex64", `panic: main.MyComplex64(0.11+3i)`}, + {"panicCustomFloat32", `panic: main.MyFloat32(-93.7)`}, + {"panicCustomFloat64", `panic: main.MyFloat64(-93.7)`}, {"panicCustomInt", `panic: main.MyInt(93)`}, {"panicCustomInt8", `panic: main.MyInt8(93)`}, {"panicCustomInt16", `panic: main.MyInt16(93)`}, @@ -34,6 +34,8 @@ func TestPanicWithDirectlyPrintableCustomTypes(t *testing.T) { {"panicCustomUint32", `panic: main.MyUint32(93)`}, {"panicCustomUint64", `panic: main.MyUint64(93)`}, {"panicCustomUintptr", `panic: main.MyUintptr(93)`}, + {"panicDeferFatal", "panic: runtime.errorString(\"invalid memory address or nil pointer dereference\")\n\tfatal error: sync: unlock of unlocked mutex"}, + {"panicDoublieDeferFatal", "panic: runtime.errorString(\"invalid memory address or nil pointer dereference\") [recovered, repanicked]\n\tfatal error: sync: unlock of unlocked mutex"}, } for _, tt := range tests { diff --git a/src/runtime/pinner.go b/src/runtime/pinner.go index 424dd065efd..dad14a4d09c 100644 --- a/src/runtime/pinner.go +++ b/src/runtime/pinner.go @@ -143,8 +143,8 @@ func isPinned(ptr unsafe.Pointer) bool { } // setPinned marks or unmarks a Go pointer as pinned, when the ptr is a Go pointer. -// It will be ignored while try to pin a non-Go pointer, -// and it will be panic while try to unpin a non-Go pointer, +// It will be ignored while trying to pin a non-Go pointer. +// It will panic while trying to unpin a non-Go pointer, // which should not happen in normal usage. func setPinned(ptr unsafe.Pointer, pin bool) bool { span := spanOfHeap(uintptr(ptr)) diff --git a/src/runtime/pprof/mprof_test.go b/src/runtime/pprof/mprof_test.go index 7c4a37e3c99..456ba904449 100644 --- a/src/runtime/pprof/mprof_test.go +++ b/src/runtime/pprof/mprof_test.go @@ -97,25 +97,25 @@ func TestMemoryProfiler(t *testing.T) { legacy string }{{ stk: []string{"runtime/pprof.allocatePersistent1K", "runtime/pprof.TestMemoryProfiler"}, - legacy: fmt.Sprintf(`%v: %v \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ + legacy: fmt.Sprintf(`%v: %v \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+( 0x[0-9,a-f]+ 0x[0-9,a-f]+)? # 0x[0-9,a-f]+ runtime/pprof\.allocatePersistent1K\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test\.go:48 # 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test\.go:87 `, 32*memoryProfilerRun, 1024*memoryProfilerRun, 32*memoryProfilerRun, 1024*memoryProfilerRun), }, { stk: []string{"runtime/pprof.allocateTransient1M", "runtime/pprof.TestMemoryProfiler"}, - legacy: fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ + legacy: fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ # 0x[0-9,a-f]+ runtime/pprof\.allocateTransient1M\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:25 # 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:84 `, (1<<10)*memoryProfilerRun, (1<<20)*memoryProfilerRun), }, { stk: []string{"runtime/pprof.allocateTransient2M", "runtime/pprof.TestMemoryProfiler"}, - legacy: fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ + legacy: fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ # 0x[0-9,a-f]+ runtime/pprof\.allocateTransient2M\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:31 # 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:85 `, memoryProfilerRun, (2<<20)*memoryProfilerRun), }, { stk: []string{"runtime/pprof.allocateTransient2MInline", "runtime/pprof.TestMemoryProfiler"}, - legacy: fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ + legacy: fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ # 0x[0-9,a-f]+ runtime/pprof\.allocateTransient2MInline\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:35 # 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:86 `, memoryProfilerRun, (2<<20)*memoryProfilerRun), diff --git a/src/runtime/pprof/pprof.go b/src/runtime/pprof/pprof.go index b524e992b8b..78d00af6cac 100644 --- a/src/runtime/pprof/pprof.go +++ b/src/runtime/pprof/pprof.go @@ -228,6 +228,31 @@ var mutexProfile = &Profile{ write: writeMutex, } +// goroutineLeakProfileLock ensures that the goroutine leak profile writer observes the +// leaked goroutines discovered during the goroutine leak detection GC cycle +// that was triggered by the profile request. +// This prevents a race condition between the garbage collector and the profile writer +// when multiple profile requests are issued concurrently: the status of leaked goroutines +// is reset to _Gwaiting at the beginning of a leak detection cycle, which may lead the +// profile writer of another concurrent request to produce an incomplete profile. +// +// Example trace: +// +// G1 | GC | G2 +// ----------------------+-----------------------------+--------------------- +// Request profile | . | . +// . | . | Request profile +// . | [G1] Resets leaked g status | . +// . | [G1] Leaks detected | . +// . | | . +// . | [G2] Resets leaked g status | . +// Write profile | . | . +// . | [G2] Leaks detected | . +// . | . | Write profile +// ----------------------+-----------------------------+--------------------- +// Incomplete profile |+++++++++++++++++++++++++++++| Complete profile +var goroutineLeakProfileLock sync.Mutex + func lockProfiles() { profiles.mu.Lock() if profiles.m == nil { @@ -763,6 +788,15 @@ func writeGoroutine(w io.Writer, debug int) error { // writeGoroutineLeak first invokes a GC cycle that performs goroutine leak detection. // It then writes the goroutine profile, filtering for leaked goroutines. func writeGoroutineLeak(w io.Writer, debug int) error { + // Acquire the goroutine leak detection lock and release + // it after the goroutine leak profile is written. + // + // While the critical section is long, this is needed to prevent + // a race condition between the garbage collector and the goroutine + // leak profile writer when multiple profile requests are issued concurrently. + goroutineLeakProfileLock.Lock() + defer goroutineLeakProfileLock.Unlock() + // Run the GC with leak detection first so that leaked goroutines // may transition to the leaked state. runtime_goroutineLeakGC() diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go index 25a2f3b3241..4c9279c5a6f 100644 --- a/src/runtime/pprof/pprof_test.go +++ b/src/runtime/pprof/pprof_test.go @@ -344,6 +344,11 @@ func (h inlineWrapper) dump(pcs []uintptr) { func inlinedWrapperCallerDump(pcs []uintptr) { var h inlineWrapperInterface + + // Take the address of h, such that h.dump() call (below) + // does not get devirtualized by the compiler. + _ = &h + h = &inlineWrapper{} h.dump(pcs) } @@ -769,7 +774,11 @@ func TestMorestack(t *testing.T) { for { go func() { growstack1() - c <- true + // NOTE(vsaioc): This goroutine may leak without this select. + select { + case c <- true: + case <-time.After(duration): + } }() select { case <-t: @@ -1560,6 +1569,203 @@ func containsCountsLabels(prof *profile.Profile, countLabels map[int64]map[strin return true } +// Inlining disabled to make identification simpler. +// +//go:noinline +func goroutineLeakExample() { + <-make(chan struct{}) + panic("unreachable") +} + +func TestGoroutineLeakProfileConcurrency(t *testing.T) { + const leakCount = 3 + + testenv.MustHaveParallelism(t) + regexLeakCount := regexp.MustCompile("goroutineleak profile: total ") + whiteSpace := regexp.MustCompile("\\s+") + + // Regular goroutine profile. Used to check that there is no interference between + // the two profile types. + goroutineProf := Lookup("goroutine") + goroutineLeakProf := goroutineLeakProfile + + // Check that a profile with debug information contains + includesLeak := func(t *testing.T, name, s string) { + if !strings.Contains(s, "runtime/pprof.goroutineLeakExample") { + t.Errorf("%s profile does not contain expected leaked goroutine (runtime/pprof.goroutineLeakExample): %s", name, s) + } + } + + checkFrame := func(i int, j int, locations []*profile.Location, expectedFunctionName string) { + if len(locations) <= i { + t.Errorf("leaked goroutine stack locations: out of range index %d, length %d", i, len(locations)) + return + } + location := locations[i] + if len(location.Line) <= j { + t.Errorf("leaked goroutine stack location lines: out of range index %d, length %d", j, len(location.Line)) + return + } + if location.Line[j].Function.Name != expectedFunctionName { + t.Errorf("leaked goroutine stack expected %s as the location[%d].Line[%d] but found %s (%s:%d)", expectedFunctionName, i, j, location.Line[j].Function.Name, location.Line[j].Function.Filename, location.Line[j].Line) + } + } + + // We use this helper to count the total number of leaked goroutines in the profile. + // + // NOTE(vsaioc): This value should match for the number of leaks produced in this test, + // but other tests could also leak goroutines, in which case we would have a mismatch + // when bulk-running tests. + // + // The two mismatching outcomes are therefore: + // - More leaks than expected, which is a correctness issue with other tests. + // In this case, this test effectively checks other tests wrt + // goroutine leaks during bulk executions (e.g., running all.bash). + // + // - Fewer leaks than expected; this is an unfortunate symptom of scheduling + // non-determinism, which may occur once in a blue moon. We make + // a best-effort attempt to allow the expected leaks to occur, by yielding + // the main thread, but it is never a guarantee. + countLeaks := func(t *testing.T, number int, s string) { + // Strip the profile header + parts := regexLeakCount.Split(s, -1) + if len(parts) < 2 { + t.Fatalf("goroutineleak profile does not contain 'goroutineleak profile: total ': %s\nparts: %v", s, parts) + return + } + + parts = whiteSpace.Split(parts[1], -1) + + count, err := strconv.ParseInt(parts[0], 10, 64) + if err != nil { + t.Fatalf("goroutineleak profile count is not a number: %s\nerror: %v", s, err) + } + + // Check that the total number of leaked goroutines is exactly the expected number. + if count != int64(number) { + t.Errorf("goroutineleak profile does not contain exactly %d leaked goroutines: %d", number, count) + } + } + + checkLeakStack := func(t *testing.T) func(pc uintptr, locations []*profile.Location, _ map[string][]string) { + return func(pc uintptr, locations []*profile.Location, _ map[string][]string) { + if pc != leakCount { + t.Errorf("expected %d leaked goroutines with specific stack configurations, but found %d", leakCount, pc) + return + } + if len(locations) < 4 || len(locations) > 5 { + message := fmt.Sprintf("leaked goroutine stack expected 4 or 5 locations but found %d", len(locations)) + for _, location := range locations { + for _, line := range location.Line { + message += fmt.Sprintf("\n%s:%d", line.Function.Name, line.Line) + } + } + t.Errorf("%s", message) + return + } + // We expect a receive operation. This is the typical stack. + checkFrame(0, 0, locations, "runtime.gopark") + checkFrame(1, 0, locations, "runtime.chanrecv") + checkFrame(2, 0, locations, "runtime.chanrecv1") + checkFrame(3, 0, locations, "runtime/pprof.goroutineLeakExample") + if len(locations) == 5 { + checkFrame(4, 0, locations, "runtime/pprof.TestGoroutineLeakProfileConcurrency.func5") + } + } + } + // Leak some goroutines that will feature in the goroutine leak profile + for i := 0; i < leakCount; i++ { + go goroutineLeakExample() + go func() { + // Leak another goroutine that will feature a slightly different stack. + // This includes the frame runtime/pprof.TestGoroutineLeakProfileConcurrency.func1. + goroutineLeakExample() + panic("unreachable") + }() + // Yield several times to allow the goroutines to leak. + runtime.Gosched() + runtime.Gosched() + } + + // Give all goroutines a chance to leak. + time.Sleep(time.Second) + + t.Run("profile contains leak", func(t *testing.T) { + var w strings.Builder + goroutineLeakProf.WriteTo(&w, 0) + parseProfile(t, []byte(w.String()), checkLeakStack(t)) + }) + + t.Run("leak persists between sequential profiling runs", func(t *testing.T) { + for i := 0; i < 2; i++ { + var w strings.Builder + goroutineLeakProf.WriteTo(&w, 0) + parseProfile(t, []byte(w.String()), checkLeakStack(t)) + } + }) + + // Concurrent calls to the goroutine leak profiler should not trigger data races + // or corruption. + t.Run("overlapping profile requests", func(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(1) + Do(ctx, Labels("i", fmt.Sprint(i)), func(context.Context) { + go func() { + defer wg.Done() + for ctx.Err() == nil { + var w strings.Builder + goroutineLeakProf.WriteTo(&w, 1) + countLeaks(t, 2*leakCount, w.String()) + includesLeak(t, "goroutineleak", w.String()) + } + }() + }) + } + wg.Wait() + }) + + // Concurrent calls to the goroutine leak profiler should not trigger data races + // or corruption, or interfere with regular goroutine profiles. + t.Run("overlapping goroutine and goroutine leak profile requests", func(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(2) + Do(ctx, Labels("i", fmt.Sprint(i)), func(context.Context) { + go func() { + defer wg.Done() + for ctx.Err() == nil { + var w strings.Builder + goroutineLeakProf.WriteTo(&w, 1) + countLeaks(t, 2*leakCount, w.String()) + includesLeak(t, "goroutineleak", w.String()) + } + }() + go func() { + defer wg.Done() + for ctx.Err() == nil { + var w strings.Builder + goroutineProf.WriteTo(&w, 1) + // The regular goroutine profile should see the leaked + // goroutines. We simply check that the goroutine leak + // profile does not corrupt the goroutine profile state. + includesLeak(t, "goroutine", w.String()) + } + }() + }) + } + wg.Wait() + }) +} + func TestGoroutineProfileConcurrency(t *testing.T) { testenv.MustHaveParallelism(t) @@ -1622,7 +1828,7 @@ func TestGoroutineProfileConcurrency(t *testing.T) { obj := new(T) ch1, ch2 := make(chan int), make(chan int) defer close(ch2) - runtime.SetFinalizer(obj, func(_ interface{}) { + runtime.SetFinalizer(obj, func(_ any) { close(ch1) <-ch2 }) @@ -1824,7 +2030,7 @@ func TestGoroutineProfileIssue74090(t *testing.T) { var objs []*T for range 10000 { obj := new(T) - runtime.SetFinalizer(obj, func(_ interface{}) {}) + runtime.SetFinalizer(obj, func(_ any) {}) objs = append(objs, obj) } objs = nil @@ -2578,9 +2784,10 @@ func TestProfilerStackDepth(t *testing.T) { t.Logf("matched stack=%s", stk) if len(stk) != depth { t.Errorf("want stack depth = %d, got %d", depth, len(stk)) + continue } - if rootFn, wantFn := stk[depth-1], "runtime/pprof.produceProfileEvents"; rootFn != wantFn { + if rootFn, wantFn := stk[depth-1], "runtime/pprof.allocDeep"; rootFn != wantFn { t.Errorf("want stack stack root %s, got %v", wantFn, rootFn) } } diff --git a/src/runtime/pprof/proto_windows.go b/src/runtime/pprof/proto_windows.go index f4dc44bd078..3118e8911e2 100644 --- a/src/runtime/pprof/proto_windows.go +++ b/src/runtime/pprof/proto_windows.go @@ -67,8 +67,7 @@ func readMainModuleMapping() (start, end uint64, exe, buildID string, err error) func createModuleSnapshot() (syscall.Handle, error) { for { snap, err := syscall.CreateToolhelp32Snapshot(windows.TH32CS_SNAPMODULE|windows.TH32CS_SNAPMODULE32, uint32(syscall.Getpid())) - var errno syscall.Errno - if err != nil && errors.As(err, &errno) && errno == windows.ERROR_BAD_LENGTH { + if errno, ok := errors.AsType[syscall.Errno](err); ok && errno == windows.ERROR_BAD_LENGTH { // When CreateToolhelp32Snapshot(SNAPMODULE|SNAPMODULE32, ...) fails // with ERROR_BAD_LENGTH then it should be retried until it succeeds. continue diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go index 5367f662138..447c7399fcb 100644 --- a/src/runtime/preempt.go +++ b/src/runtime/preempt.go @@ -134,7 +134,7 @@ func suspendG(gp *g) suspendGState { dumpgstatus(gp) throw("invalid g status") - case _Gdead: + case _Gdead, _Gdeadextra: // Nothing to suspend. // // preemptStop may need to be cleared, but @@ -286,7 +286,7 @@ func resumeG(state suspendGState) { // //go:nosplit func canPreemptM(mp *m) bool { - return mp.locks == 0 && mp.mallocing == 0 && mp.preemptoff == "" && mp.p.ptr().status == _Prunning + return mp.locks == 0 && mp.mallocing == 0 && mp.preemptoff == "" && mp.p.ptr().status == _Prunning && mp.curg != nil && readgstatus(mp.curg)&^_Gscan != _Gsyscall } //go:generate go run mkpreempt.go diff --git a/src/runtime/preempt_arm64.go b/src/runtime/preempt_arm64.go new file mode 100644 index 00000000000..1b71d2713ea --- /dev/null +++ b/src/runtime/preempt_arm64.go @@ -0,0 +1,38 @@ +// Code generated by mkpreempt.go; DO NOT EDIT. + +package runtime + +type xRegs struct { + V0 [16]byte + V1 [16]byte + V2 [16]byte + V3 [16]byte + V4 [16]byte + V5 [16]byte + V6 [16]byte + V7 [16]byte + V8 [16]byte + V9 [16]byte + V10 [16]byte + V11 [16]byte + V12 [16]byte + V13 [16]byte + V14 [16]byte + V15 [16]byte + V16 [16]byte + V17 [16]byte + V18 [16]byte + V19 [16]byte + V20 [16]byte + V21 [16]byte + V22 [16]byte + V23 [16]byte + V24 [16]byte + V25 [16]byte + V26 [16]byte + V27 [16]byte + V28 [16]byte + V29 [16]byte + V30 [16]byte + V31 [16]byte +} diff --git a/src/runtime/preempt_arm64.s b/src/runtime/preempt_arm64.s index 31ec9d940f7..9017d881597 100644 --- a/src/runtime/preempt_arm64.s +++ b/src/runtime/preempt_arm64.s @@ -4,13 +4,14 @@ #include "textflag.h" TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 - MOVD R30, -496(RSP) - SUB $496, RSP + MOVD R30, -240(RSP) + SUB $240, RSP MOVD R29, -8(RSP) SUB $8, RSP, R29 #ifdef GOOS_ios MOVD R30, (RSP) #endif + // Save GPs STP (R0, R1), 8(RSP) STP (R2, R3), 24(RSP) STP (R4, R5), 40(RSP) @@ -28,39 +29,32 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVD R0, 216(RSP) MOVD FPSR, R0 MOVD R0, 224(RSP) - FSTPD (F0, F1), 232(RSP) - FSTPD (F2, F3), 248(RSP) - FSTPD (F4, F5), 264(RSP) - FSTPD (F6, F7), 280(RSP) - FSTPD (F8, F9), 296(RSP) - FSTPD (F10, F11), 312(RSP) - FSTPD (F12, F13), 328(RSP) - FSTPD (F14, F15), 344(RSP) - FSTPD (F16, F17), 360(RSP) - FSTPD (F18, F19), 376(RSP) - FSTPD (F20, F21), 392(RSP) - FSTPD (F22, F23), 408(RSP) - FSTPD (F24, F25), 424(RSP) - FSTPD (F26, F27), 440(RSP) - FSTPD (F28, F29), 456(RSP) - FSTPD (F30, F31), 472(RSP) + // Save extended register state to p.xRegs.scratch + MOVD g_m(g), R0 + MOVD m_p(R0), R0 + ADD $(p_xRegs+xRegPerP_scratch), R0, R0 + VST1.P [V0.B16, V1.B16, V2.B16, V3.B16], 64(R0) + VST1.P [V4.B16, V5.B16, V6.B16, V7.B16], 64(R0) + VST1.P [V8.B16, V9.B16, V10.B16, V11.B16], 64(R0) + VST1.P [V12.B16, V13.B16, V14.B16, V15.B16], 64(R0) + VST1.P [V16.B16, V17.B16, V18.B16, V19.B16], 64(R0) + VST1.P [V20.B16, V21.B16, V22.B16, V23.B16], 64(R0) + VST1.P [V24.B16, V25.B16, V26.B16, V27.B16], 64(R0) + VST1.P [V28.B16, V29.B16, V30.B16, V31.B16], 64(R0) CALL ·asyncPreempt2(SB) - FLDPD 472(RSP), (F30, F31) - FLDPD 456(RSP), (F28, F29) - FLDPD 440(RSP), (F26, F27) - FLDPD 424(RSP), (F24, F25) - FLDPD 408(RSP), (F22, F23) - FLDPD 392(RSP), (F20, F21) - FLDPD 376(RSP), (F18, F19) - FLDPD 360(RSP), (F16, F17) - FLDPD 344(RSP), (F14, F15) - FLDPD 328(RSP), (F12, F13) - FLDPD 312(RSP), (F10, F11) - FLDPD 296(RSP), (F8, F9) - FLDPD 280(RSP), (F6, F7) - FLDPD 264(RSP), (F4, F5) - FLDPD 248(RSP), (F2, F3) - FLDPD 232(RSP), (F0, F1) + // Restore non-GPs from *p.xRegs.cache + MOVD g_m(g), R0 + MOVD m_p(R0), R0 + MOVD (p_xRegs+xRegPerP_cache)(R0), R0 + VLD1.P 64(R0), [V0.B16, V1.B16, V2.B16, V3.B16] + VLD1.P 64(R0), [V4.B16, V5.B16, V6.B16, V7.B16] + VLD1.P 64(R0), [V8.B16, V9.B16, V10.B16, V11.B16] + VLD1.P 64(R0), [V12.B16, V13.B16, V14.B16, V15.B16] + VLD1.P 64(R0), [V16.B16, V17.B16, V18.B16, V19.B16] + VLD1.P 64(R0), [V20.B16, V21.B16, V22.B16, V23.B16] + VLD1.P 64(R0), [V24.B16, V25.B16, V26.B16, V27.B16] + VLD1.P 64(R0), [V28.B16, V29.B16, V30.B16, V31.B16] + // Restore GPs MOVD 224(RSP), R0 MOVD R0, FPSR MOVD 216(RSP), R0 @@ -78,8 +72,8 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 LDP 40(RSP), (R4, R5) LDP 24(RSP), (R2, R3) LDP 8(RSP), (R0, R1) - MOVD 496(RSP), R30 + MOVD 240(RSP), R30 MOVD -8(RSP), R29 MOVD (RSP), R27 - ADD $512, RSP + ADD $256, RSP RET (R27) diff --git a/src/runtime/preempt_loong64.go b/src/runtime/preempt_loong64.go new file mode 100644 index 00000000000..c7fec338f26 --- /dev/null +++ b/src/runtime/preempt_loong64.go @@ -0,0 +1,38 @@ +// Code generated by mkpreempt.go; DO NOT EDIT. + +package runtime + +type xRegs struct { + X0 [32]byte + X1 [32]byte + X2 [32]byte + X3 [32]byte + X4 [32]byte + X5 [32]byte + X6 [32]byte + X7 [32]byte + X8 [32]byte + X9 [32]byte + X10 [32]byte + X11 [32]byte + X12 [32]byte + X13 [32]byte + X14 [32]byte + X15 [32]byte + X16 [32]byte + X17 [32]byte + X18 [32]byte + X19 [32]byte + X20 [32]byte + X21 [32]byte + X22 [32]byte + X23 [32]byte + X24 [32]byte + X25 [32]byte + X26 [32]byte + X27 [32]byte + X28 [32]byte + X29 [32]byte + X30 [32]byte + X31 [32]byte +} diff --git a/src/runtime/preempt_loong64.s b/src/runtime/preempt_loong64.s index 626dc4b6f67..4bc7ea3947b 100644 --- a/src/runtime/preempt_loong64.s +++ b/src/runtime/preempt_loong64.s @@ -4,8 +4,9 @@ #include "textflag.h" TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 - MOVV R1, -480(R3) - SUBV $480, R3 + MOVV R1, -224(R3) + SUBV $224, R3 + // Save GPs MOVV R4, 8(R3) MOVV R5, 16(R3) MOVV R6, 24(R3) @@ -32,38 +33,6 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVV R28, 192(R3) MOVV R29, 200(R3) MOVV R31, 208(R3) - MOVD F0, 216(R3) - MOVD F1, 224(R3) - MOVD F2, 232(R3) - MOVD F3, 240(R3) - MOVD F4, 248(R3) - MOVD F5, 256(R3) - MOVD F6, 264(R3) - MOVD F7, 272(R3) - MOVD F8, 280(R3) - MOVD F9, 288(R3) - MOVD F10, 296(R3) - MOVD F11, 304(R3) - MOVD F12, 312(R3) - MOVD F13, 320(R3) - MOVD F14, 328(R3) - MOVD F15, 336(R3) - MOVD F16, 344(R3) - MOVD F17, 352(R3) - MOVD F18, 360(R3) - MOVD F19, 368(R3) - MOVD F20, 376(R3) - MOVD F21, 384(R3) - MOVD F22, 392(R3) - MOVD F23, 400(R3) - MOVD F24, 408(R3) - MOVD F25, 416(R3) - MOVD F26, 424(R3) - MOVD F27, 432(R3) - MOVD F28, 440(R3) - MOVD F29, 448(R3) - MOVD F30, 456(R3) - MOVD F31, 464(R3) MOVV FCC0, R4 BSTRINSV $7, R4, $0, R5 MOVV FCC1, R4 @@ -80,9 +49,230 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 BSTRINSV $55, R4, $48, R5 MOVV FCC7, R4 BSTRINSV $63, R4, $56, R5 - MOVV R5, 472(R3) + MOVV R5, 216(R3) + // Save extended register state to p.xRegs.scratch + MOVV g_m(g), R4 + MOVV m_p(R4), R4 + ADDV $(p_xRegs+xRegPerP_scratch), R4, R4 + MOVBU internal∕cpu·Loong64+const_offsetLOONG64HasLASX(SB), R5 + BNE R5, saveLASX + MOVBU internal∕cpu·Loong64+const_offsetLOONG64HasLSX(SB), R5 + BNE R5, saveLSX +saveFP: + MOVD F0, 0(R4) + MOVD F1, 32(R4) + MOVD F2, 64(R4) + MOVD F3, 96(R4) + MOVD F4, 128(R4) + MOVD F5, 160(R4) + MOVD F6, 192(R4) + MOVD F7, 224(R4) + MOVD F8, 256(R4) + MOVD F9, 288(R4) + MOVD F10, 320(R4) + MOVD F11, 352(R4) + MOVD F12, 384(R4) + MOVD F13, 416(R4) + MOVD F14, 448(R4) + MOVD F15, 480(R4) + MOVD F16, 512(R4) + MOVD F17, 544(R4) + MOVD F18, 576(R4) + MOVD F19, 608(R4) + MOVD F20, 640(R4) + MOVD F21, 672(R4) + MOVD F22, 704(R4) + MOVD F23, 736(R4) + MOVD F24, 768(R4) + MOVD F25, 800(R4) + MOVD F26, 832(R4) + MOVD F27, 864(R4) + MOVD F28, 896(R4) + MOVD F29, 928(R4) + MOVD F30, 960(R4) + MOVD F31, 992(R4) + JMP preempt +saveLSX: + VMOVQ V0, 0(R4) + VMOVQ V1, 32(R4) + VMOVQ V2, 64(R4) + VMOVQ V3, 96(R4) + VMOVQ V4, 128(R4) + VMOVQ V5, 160(R4) + VMOVQ V6, 192(R4) + VMOVQ V7, 224(R4) + VMOVQ V8, 256(R4) + VMOVQ V9, 288(R4) + VMOVQ V10, 320(R4) + VMOVQ V11, 352(R4) + VMOVQ V12, 384(R4) + VMOVQ V13, 416(R4) + VMOVQ V14, 448(R4) + VMOVQ V15, 480(R4) + VMOVQ V16, 512(R4) + VMOVQ V17, 544(R4) + VMOVQ V18, 576(R4) + VMOVQ V19, 608(R4) + VMOVQ V20, 640(R4) + VMOVQ V21, 672(R4) + VMOVQ V22, 704(R4) + VMOVQ V23, 736(R4) + VMOVQ V24, 768(R4) + VMOVQ V25, 800(R4) + VMOVQ V26, 832(R4) + VMOVQ V27, 864(R4) + VMOVQ V28, 896(R4) + VMOVQ V29, 928(R4) + VMOVQ V30, 960(R4) + VMOVQ V31, 992(R4) + JMP preempt +saveLASX: + XVMOVQ X0, 0(R4) + XVMOVQ X1, 32(R4) + XVMOVQ X2, 64(R4) + XVMOVQ X3, 96(R4) + XVMOVQ X4, 128(R4) + XVMOVQ X5, 160(R4) + XVMOVQ X6, 192(R4) + XVMOVQ X7, 224(R4) + XVMOVQ X8, 256(R4) + XVMOVQ X9, 288(R4) + XVMOVQ X10, 320(R4) + XVMOVQ X11, 352(R4) + XVMOVQ X12, 384(R4) + XVMOVQ X13, 416(R4) + XVMOVQ X14, 448(R4) + XVMOVQ X15, 480(R4) + XVMOVQ X16, 512(R4) + XVMOVQ X17, 544(R4) + XVMOVQ X18, 576(R4) + XVMOVQ X19, 608(R4) + XVMOVQ X20, 640(R4) + XVMOVQ X21, 672(R4) + XVMOVQ X22, 704(R4) + XVMOVQ X23, 736(R4) + XVMOVQ X24, 768(R4) + XVMOVQ X25, 800(R4) + XVMOVQ X26, 832(R4) + XVMOVQ X27, 864(R4) + XVMOVQ X28, 896(R4) + XVMOVQ X29, 928(R4) + XVMOVQ X30, 960(R4) + XVMOVQ X31, 992(R4) +preempt: CALL ·asyncPreempt2(SB) - MOVV 472(R3), R5 + // Restore non-GPs from *p.xRegs.cache + MOVV g_m(g), R4 + MOVV m_p(R4), R4 + MOVV (p_xRegs+xRegPerP_cache)(R4), R4 + MOVBU internal∕cpu·Loong64+const_offsetLOONG64HasLASX(SB), R5 + BNE R5, restoreLASX + MOVBU internal∕cpu·Loong64+const_offsetLOONG64HasLSX(SB), R5 + BNE R5, restoreLSX +restoreFP: + MOVD 992(R4), F31 + MOVD 960(R4), F30 + MOVD 928(R4), F29 + MOVD 896(R4), F28 + MOVD 864(R4), F27 + MOVD 832(R4), F26 + MOVD 800(R4), F25 + MOVD 768(R4), F24 + MOVD 736(R4), F23 + MOVD 704(R4), F22 + MOVD 672(R4), F21 + MOVD 640(R4), F20 + MOVD 608(R4), F19 + MOVD 576(R4), F18 + MOVD 544(R4), F17 + MOVD 512(R4), F16 + MOVD 480(R4), F15 + MOVD 448(R4), F14 + MOVD 416(R4), F13 + MOVD 384(R4), F12 + MOVD 352(R4), F11 + MOVD 320(R4), F10 + MOVD 288(R4), F9 + MOVD 256(R4), F8 + MOVD 224(R4), F7 + MOVD 192(R4), F6 + MOVD 160(R4), F5 + MOVD 128(R4), F4 + MOVD 96(R4), F3 + MOVD 64(R4), F2 + MOVD 32(R4), F1 + MOVD 0(R4), F0 + JMP restoreGPs +restoreLSX: + VMOVQ 992(R4), V31 + VMOVQ 960(R4), V30 + VMOVQ 928(R4), V29 + VMOVQ 896(R4), V28 + VMOVQ 864(R4), V27 + VMOVQ 832(R4), V26 + VMOVQ 800(R4), V25 + VMOVQ 768(R4), V24 + VMOVQ 736(R4), V23 + VMOVQ 704(R4), V22 + VMOVQ 672(R4), V21 + VMOVQ 640(R4), V20 + VMOVQ 608(R4), V19 + VMOVQ 576(R4), V18 + VMOVQ 544(R4), V17 + VMOVQ 512(R4), V16 + VMOVQ 480(R4), V15 + VMOVQ 448(R4), V14 + VMOVQ 416(R4), V13 + VMOVQ 384(R4), V12 + VMOVQ 352(R4), V11 + VMOVQ 320(R4), V10 + VMOVQ 288(R4), V9 + VMOVQ 256(R4), V8 + VMOVQ 224(R4), V7 + VMOVQ 192(R4), V6 + VMOVQ 160(R4), V5 + VMOVQ 128(R4), V4 + VMOVQ 96(R4), V3 + VMOVQ 64(R4), V2 + VMOVQ 32(R4), V1 + VMOVQ 0(R4), V0 + JMP restoreGPs +restoreLASX: + XVMOVQ 992(R4), X31 + XVMOVQ 960(R4), X30 + XVMOVQ 928(R4), X29 + XVMOVQ 896(R4), X28 + XVMOVQ 864(R4), X27 + XVMOVQ 832(R4), X26 + XVMOVQ 800(R4), X25 + XVMOVQ 768(R4), X24 + XVMOVQ 736(R4), X23 + XVMOVQ 704(R4), X22 + XVMOVQ 672(R4), X21 + XVMOVQ 640(R4), X20 + XVMOVQ 608(R4), X19 + XVMOVQ 576(R4), X18 + XVMOVQ 544(R4), X17 + XVMOVQ 512(R4), X16 + XVMOVQ 480(R4), X15 + XVMOVQ 448(R4), X14 + XVMOVQ 416(R4), X13 + XVMOVQ 384(R4), X12 + XVMOVQ 352(R4), X11 + XVMOVQ 320(R4), X10 + XVMOVQ 288(R4), X9 + XVMOVQ 256(R4), X8 + XVMOVQ 224(R4), X7 + XVMOVQ 192(R4), X6 + XVMOVQ 160(R4), X5 + XVMOVQ 128(R4), X4 + XVMOVQ 96(R4), X3 + XVMOVQ 64(R4), X2 + XVMOVQ 32(R4), X1 + XVMOVQ 0(R4), X0 + // Restore GPs +restoreGPs: + MOVV 216(R3), R5 BSTRPICKV $7, R5, $0, R4 MOVV R4, FCC0 BSTRPICKV $15, R5, $8, R4 @@ -99,38 +289,6 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVV R4, FCC6 BSTRPICKV $63, R5, $56, R4 MOVV R4, FCC7 - MOVD 464(R3), F31 - MOVD 456(R3), F30 - MOVD 448(R3), F29 - MOVD 440(R3), F28 - MOVD 432(R3), F27 - MOVD 424(R3), F26 - MOVD 416(R3), F25 - MOVD 408(R3), F24 - MOVD 400(R3), F23 - MOVD 392(R3), F22 - MOVD 384(R3), F21 - MOVD 376(R3), F20 - MOVD 368(R3), F19 - MOVD 360(R3), F18 - MOVD 352(R3), F17 - MOVD 344(R3), F16 - MOVD 336(R3), F15 - MOVD 328(R3), F14 - MOVD 320(R3), F13 - MOVD 312(R3), F12 - MOVD 304(R3), F11 - MOVD 296(R3), F10 - MOVD 288(R3), F9 - MOVD 280(R3), F8 - MOVD 272(R3), F7 - MOVD 264(R3), F6 - MOVD 256(R3), F5 - MOVD 248(R3), F4 - MOVD 240(R3), F3 - MOVD 232(R3), F2 - MOVD 224(R3), F1 - MOVD 216(R3), F0 MOVV 208(R3), R31 MOVV 200(R3), R29 MOVV 192(R3), R28 @@ -157,7 +315,7 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVV 24(R3), R6 MOVV 16(R3), R5 MOVV 8(R3), R4 - MOVV 480(R3), R1 + MOVV 224(R3), R1 MOVV (R3), R30 - ADDV $488, R3 + ADDV $232, R3 JMP (R30) diff --git a/src/runtime/preempt_noxreg.go b/src/runtime/preempt_noxreg.go index dfe46559b5b..977bf0bcec7 100644 --- a/src/runtime/preempt_noxreg.go +++ b/src/runtime/preempt_noxreg.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !amd64 +//go:build !amd64 && !arm64 && !loong64 // This provides common support for architectures that DO NOT use extended // register state in asynchronous preemption. diff --git a/src/runtime/preempt_xreg.go b/src/runtime/preempt_xreg.go index 9e05455ddbb..cc52c5f3c4e 100644 --- a/src/runtime/preempt_xreg.go +++ b/src/runtime/preempt_xreg.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build amd64 +//go:build amd64 || arm64 || loong64 // This provides common support for architectures that use extended register // state in asynchronous preemption. diff --git a/src/runtime/print.go b/src/runtime/print.go index 0b05aedad3f..c01db9d7f98 100644 --- a/src/runtime/print.go +++ b/src/runtime/print.go @@ -6,6 +6,7 @@ package runtime import ( "internal/goarch" + "internal/strconv" "unsafe" ) @@ -118,101 +119,53 @@ func printbool(v bool) { } } -func printfloat(v float64) { - switch { - case v != v: - printstring("NaN") - return - case v+v == v && v > 0: - printstring("+Inf") - return - case v+v == v && v < 0: - printstring("-Inf") - return - } - - const n = 7 // digits printed - var buf [n + 7]byte - buf[0] = '+' - e := 0 // exp - if v == 0 { - if 1/v < 0 { - buf[0] = '-' - } - } else { - if v < 0 { - v = -v - buf[0] = '-' - } - - // normalize - for v >= 10 { - e++ - v /= 10 - } - for v < 1 { - e-- - v *= 10 - } - - // round - h := 5.0 - for i := 0; i < n; i++ { - h /= 10 - } - v += h - if v >= 10 { - e++ - v /= 10 - } - } - - // format +d.dddd+edd - for i := 0; i < n; i++ { - s := int(v) - buf[i+2] = byte(s + '0') - v -= float64(s) - v *= 10 - } - buf[1] = buf[2] - buf[2] = '.' - - buf[n+2] = 'e' - buf[n+3] = '+' - if e < 0 { - e = -e - buf[n+3] = '-' - } - - buf[n+4] = byte(e/100) + '0' - buf[n+5] = byte(e/10)%10 + '0' - buf[n+6] = byte(e%10) + '0' - gwrite(buf[:]) +func printfloat64(v float64) { + var buf [20]byte + gwrite(strconv.AppendFloat(buf[:0], v, 'g', -1, 64)) } -func printcomplex(c complex128) { - print("(", real(c), imag(c), "i)") +func printfloat32(v float32) { + var buf [20]byte + gwrite(strconv.AppendFloat(buf[:0], float64(v), 'g', -1, 32)) +} + +func printcomplex128(c complex128) { + var buf [44]byte + gwrite(strconv.AppendComplex(buf[:0], c, 'g', -1, 128)) +} + +func printcomplex64(c complex64) { + var buf [44]byte + gwrite(strconv.AppendComplex(buf[:0], complex128(c), 'g', -1, 64)) } func printuint(v uint64) { - var buf [100]byte - i := len(buf) - for i--; i > 0; i-- { - buf[i] = byte(v%10 + '0') - if v < 10 { - break - } - v /= 10 - } + // Note: Avoiding strconv.AppendUint so that it's clearer + // that there are no allocations in this routine. + // cmd/link/internal/ld.TestAbstractOriginSanity + // sees the append and doesn't realize it doesn't allocate. + var buf [20]byte + i := strconv.RuntimeFormatBase10(buf[:], v) gwrite(buf[i:]) } func printint(v int64) { - if v < 0 { - printstring("-") - v = -v + // Note: Avoiding strconv.AppendUint so that it's clearer + // that there are no allocations in this routine. + // cmd/link/internal/ld.TestAbstractOriginSanity + // sees the append and doesn't realize it doesn't allocate. + neg := v < 0 + u := uint64(v) + if neg { + u = -u } - printuint(uint64(v)) + var buf [20]byte + i := strconv.RuntimeFormatBase10(buf[:], u) + if neg { + i-- + buf[i] = '-' + } + gwrite(buf[i:]) } var minhexdigits = 0 // protected by printlock diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 38299d82c02..21b276cabf8 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -11,8 +11,8 @@ import ( "internal/goos" "internal/runtime/atomic" "internal/runtime/exithook" - "internal/runtime/strconv" "internal/runtime/sys" + "internal/strconv" "internal/stringslite" "unsafe" ) @@ -288,13 +288,6 @@ func main() { fn := main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime fn() - exitHooksRun := false - if raceenabled { - runExitHooks(0) // run hooks now, since racefini does not return - exitHooksRun = true - racefini() - } - // Check for C memory leaks if using ASAN and we've made cgo calls, // or if we are running as a library in a C program. // We always make one cgo call, above, to notify_runtime_init_done, @@ -302,6 +295,7 @@ func main() { // No point in leak checking if no cgo calls, since leak checking // just looks for objects allocated using malloc and friends. // Just checking iscgo doesn't help because asan implies iscgo. + exitHooksRun := false if asanenabled && (isarchive || islibrary || NumCgoCall() > 1) { runExitHooks(0) // lsandoleakcheck may not return exitHooksRun = true @@ -327,6 +321,9 @@ func main() { if !exitHooksRun { runExitHooks(0) } + if raceenabled { + racefini() // does not return + } exit(0) for { @@ -853,6 +850,8 @@ func schedinit() { lockVerifyMSize() + sched.midle.init(unsafe.Offsetof(m{}.idleNode)) + // raceinit must be the first call to race detector. // In particular, it must be done before mallocinit below calls racemapshadow. gp := getg() @@ -919,8 +918,8 @@ func schedinit() { lock(&sched.lock) sched.lastpoll.Store(nanotime()) var procs int32 - if n, ok := strconv.Atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 { - procs = n + if n, err := strconv.ParseInt(gogetenv("GOMAXPROCS"), 10, 32); err == nil && n > 0 { + procs = int32(n) sched.customGOMAXPROCS = true } else { // Use numCPUStartup for initial GOMAXPROCS for two reasons: @@ -1011,6 +1010,8 @@ func mcommoninit(mp *m, id int64) { mp.id = mReserveID() } + mp.self = newMWeakPointer(mp) + mrandinit(mp) mpreinit(mp) @@ -1224,7 +1225,8 @@ func casfrom_Gscanstatus(gp *g, oldval, newval uint32) { _Gscanrunning, _Gscansyscall, _Gscanleaked, - _Gscanpreempted: + _Gscanpreempted, + _Gscandeadextra: if newval == oldval&^_Gscan { success = gp.atomicstatus.CompareAndSwap(oldval, newval) } @@ -1245,7 +1247,8 @@ func castogscanstatus(gp *g, oldval, newval uint32) bool { _Grunning, _Gwaiting, _Gleaked, - _Gsyscall: + _Gsyscall, + _Gdeadextra: if newval == oldval|_Gscan { r := gp.atomicstatus.CompareAndSwap(oldval, newval) if r { @@ -1256,8 +1259,8 @@ func castogscanstatus(gp *g, oldval, newval uint32) bool { } } print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n") - throw("castogscanstatus") - panic("not reached") + throw("bad oldval passed to castogscanstatus") + return false } // casgstatusAlwaysTrack is a debug flag that causes casgstatus to always track @@ -1315,8 +1318,9 @@ func casgstatus(gp *g, oldval, newval uint32) { }) } - if oldval == _Grunning { - // Track every gTrackingPeriod time a goroutine transitions out of running. + if (oldval == _Grunning || oldval == _Gsyscall) && (newval != _Grunning && newval != _Gsyscall) { + // Track every gTrackingPeriod time a goroutine transitions out of _Grunning or _Gsyscall. + // Do not track _Grunning <-> _Gsyscall transitions, since they're two very similar states. if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 { gp.tracking = true } @@ -1653,29 +1657,21 @@ func stopTheWorldWithSema(reason stwReason) worldStop { sched.stopwait = gomaxprocs sched.gcwaiting.Store(true) preemptall() - // stop current P + + // Stop current P. gp.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic. gp.m.p.ptr().gcStopTime = start sched.stopwait-- - // try to retake all P's in Psyscall status - trace = traceAcquire() + + // Try to retake all P's in syscalls. for _, pp := range allp { - s := pp.status - if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) { - if trace.ok() { - trace.ProcSteal(pp, false) - } - sched.nGsyscallNoP.Add(1) - pp.syscalltick++ - pp.gcStopTime = nanotime() - sched.stopwait-- + if thread, ok := setBlockOnExitSyscall(pp); ok { + thread.gcstopP() + thread.resume() } } - if trace.ok() { - traceRelease(trace) - } - // stop idle P's + // Stop idle Ps. now := nanotime() for { pp, _ := pidleget(now) @@ -1689,7 +1685,7 @@ func stopTheWorldWithSema(reason stwReason) worldStop { wait := sched.stopwait > 0 unlock(&sched.lock) - // wait for remaining P's to stop voluntarily + // Wait for remaining Ps to stop voluntarily. if wait { for { // wait for 100us, then try to re-preempt in case of any races @@ -2025,6 +2021,10 @@ func mexit(osStack bool) { // Free vgetrandom state. vgetrandomDestroy(mp) + // Clear the self pointer so Ps don't access this M after it is freed, + // or keep it alive. + mp.self.clear() + // Remove m from allm. lock(&sched.lock) for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink { @@ -2155,9 +2155,9 @@ func forEachPInternal(fn func(*p)) { } preemptall() - // Any P entering _Pidle or _Psyscall from now on will observe + // Any P entering _Pidle or a system call from now on will observe // p.runSafePointFn == 1 and will call runSafePointFn when - // changing its status to _Pidle/_Psyscall. + // changing its status to _Pidle. // Run safe point function for all idle Ps. sched.pidle will // not change because we hold sched.lock. @@ -2174,25 +2174,17 @@ func forEachPInternal(fn func(*p)) { // Run fn for the current P. fn(pp) - // Force Ps currently in _Psyscall into _Pidle and hand them + // Force Ps currently in a system call into _Pidle and hand them // off to induce safe point function execution. for _, p2 := range allp { - s := p2.status - - // We need to be fine-grained about tracing here, since handoffp - // might call into the tracer, and the tracer is non-reentrant. - trace := traceAcquire() - if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) { - if trace.ok() { - // It's important that we traceRelease before we call handoffp, which may also traceAcquire. - trace.ProcSteal(p2, false) - traceRelease(trace) - } - sched.nGsyscallNoP.Add(1) - p2.syscalltick++ + if atomic.Load(&p2.runSafePointFn) != 1 { + // Already ran it. + continue + } + if thread, ok := setBlockOnExitSyscall(p2); ok { + thread.takeP() + thread.resume() handoffp(p2) - } else if trace.ok() { - traceRelease(trace) } } @@ -2233,9 +2225,9 @@ func forEachPInternal(fn func(*p)) { // } // // runSafePointFn must be checked on any transition in to _Pidle or -// _Psyscall to avoid a race where forEachP sees that the P is running -// just before the P goes into _Pidle/_Psyscall and neither forEachP -// nor the P run the safe-point function. +// when entering a system call to avoid a race where forEachP sees +// that the P is running just before the P goes into _Pidle/system call +// and neither forEachP nor the P run the safe-point function. func runSafePointFn() { p := getg().m.p.ptr() // Resolve the race between forEachP running the safe-point @@ -2460,7 +2452,7 @@ func needm(signal bool) { } // mp.curg is now a real goroutine. - casgstatus(mp.curg, _Gdead, _Gsyscall) + casgstatus(mp.curg, _Gdeadextra, _Gsyscall) sched.ngsys.Add(-1) sched.nGsyscallNoP.Add(1) @@ -2516,11 +2508,10 @@ func oneNewExtraM() { gp.syscallpc = gp.sched.pc gp.syscallsp = gp.sched.sp gp.stktopsp = gp.sched.sp - // malg returns status as _Gidle. Change to _Gdead before - // adding to allg where GC can see it. We use _Gdead to hide - // this from tracebacks and stack scans since it isn't a - // "real" goroutine until needm grabs it. - casgstatus(gp, _Gidle, _Gdead) + // malg returns status as _Gidle. Change to _Gdeadextra before + // adding to allg where GC can see it. _Gdeadextra hides this + // from traceback and stack scans. + casgstatus(gp, _Gidle, _Gdeadextra) gp.m = mp mp.curg = gp mp.isextra = true @@ -2571,7 +2562,7 @@ func oneNewExtraM() { // So that the destructor would invoke dropm while the non-Go thread is exiting. // This is much faster since it avoids expensive signal-related syscalls. // -// This always runs without a P, so //go:nowritebarrierrec is required. +// This may run without a P, so //go:nowritebarrierrec is required. // // This may run with a different stack than was recorded in g0 (there is no // call to callbackUpdateSystemStack prior to dropm), so this must be @@ -2594,8 +2585,8 @@ func dropm() { trace = traceAcquire() } - // Return mp.curg to dead state. - casgstatus(mp.curg, _Gsyscall, _Gdead) + // Return mp.curg to _Gdeadextra state. + casgstatus(mp.curg, _Gsyscall, _Gdeadextra) mp.curg.preemptStop = false sched.ngsys.Add(1) sched.nGsyscallNoP.Add(-1) @@ -4275,6 +4266,7 @@ func park_m(gp *g) { } func goschedImpl(gp *g, preempted bool) { + pp := gp.m.p.ptr() trace := traceAcquire() status := readgstatus(gp) if status&^_Gscan != _Grunning { @@ -4297,9 +4289,15 @@ func goschedImpl(gp *g, preempted bool) { } dropg() - lock(&sched.lock) - globrunqput(gp) - unlock(&sched.lock) + if preempted && sched.gcwaiting.Load() { + // If preempted for STW, keep the G on the local P in runnext + // so it can keep running immediately after the STW. + runqput(pp, gp, true) + } else { + lock(&sched.lock) + globrunqput(gp) + unlock(&sched.lock) + } if mainStarted { wakep() @@ -4358,21 +4356,26 @@ func preemptPark(gp *g) { casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted) dropg() - // Be careful about how we trace this next event. The ordering - // is subtle. + // Be careful about ownership as we trace this next event. // - // The moment we CAS into _Gpreempted, suspendG could CAS to - // _Gwaiting, do its work, and ready the goroutine. All of + // According to the tracer invariants (trace.go) it's unsafe + // for us to emit an event for a goroutine we do not own. + // The moment we CAS into _Gpreempted, suspendG could CAS the + // goroutine to _Gwaiting, effectively taking ownership. All of // this could happen before we even get the chance to emit // an event. The end result is that the events could appear // out of order, and the tracer generally assumes the scheduler // takes care of the ordering between GoPark and GoUnpark. // // The answer here is simple: emit the event while we still hold - // the _Gscan bit on the goroutine. We still need to traceAcquire - // and traceRelease across the CAS because the tracer could be - // what's calling suspendG in the first place, and we want the - // CAS and event emission to appear atomic to the tracer. + // the _Gscan bit on the goroutine, since the _Gscan bit means + // ownership over transitions. + // + // We still need to traceAcquire and traceRelease across the CAS + // because the tracer could be what's calling suspendG in the first + // place. This also upholds the tracer invariant that we must hold + // traceAcquire/traceRelease across the transition. However, we + // specifically *only* emit the event while we still have ownership. trace := traceAcquire() if trace.ok() { trace.GoPark(traceBlockPreempted, 0) @@ -4566,7 +4569,6 @@ func save(pc, sp, bp uintptr) { // //go:nosplit func reentersyscall(pc, sp, bp uintptr) { - trace := traceAcquire() gp := getg() // Disable preemption because during this function g is in Gsyscall status, @@ -4580,17 +4582,23 @@ func reentersyscall(pc, sp, bp uintptr) { gp.stackguard0 = stackPreempt gp.throwsplit = true + // Copy the syscalltick over so we can identify if the P got stolen later. + gp.m.syscalltick = gp.m.p.ptr().syscalltick + + pp := gp.m.p.ptr() + if pp.runSafePointFn != 0 { + // runSafePointFn may stack split if run on this stack + systemstack(runSafePointFn) + } + gp.m.oldp.set(pp) + // Leave SP around for GC and traceback. save(pc, sp, bp) gp.syscallsp = sp gp.syscallpc = pc gp.syscallbp = bp - casgstatus(gp, _Grunning, _Gsyscall) - if staticLockRanking { - // When doing static lock ranking casgstatus can call - // systemstack which clobbers g.sched. - save(pc, sp, bp) - } + + // Double-check sp and bp. if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp { systemstack(func() { print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") @@ -4603,40 +4611,51 @@ func reentersyscall(pc, sp, bp uintptr) { throw("entersyscall") }) } - + trace := traceAcquire() if trace.ok() { + // Emit a trace event. Notably, actually emitting the event must happen before + // the casgstatus because it mutates the P, but the traceLocker must be held + // across the casgstatus since we're transitioning out of _Grunning + // (see trace.go invariants). systemstack(func() { trace.GoSysCall() - traceRelease(trace) }) - // systemstack itself clobbers g.sched.{pc,sp} and we might - // need them later when the G is genuinely blocked in a - // syscall + // systemstack clobbered gp.sched, so restore it. save(pc, sp, bp) } - - if sched.sysmonwait.Load() { - systemstack(entersyscall_sysmon) - save(pc, sp, bp) - } - - if gp.m.p.ptr().runSafePointFn != 0 { - // runSafePointFn may stack split if run on this stack - systemstack(runSafePointFn) - save(pc, sp, bp) - } - - gp.m.syscalltick = gp.m.p.ptr().syscalltick - pp := gp.m.p.ptr() - pp.m = 0 - gp.m.oldp.set(pp) - gp.m.p = 0 - atomic.Store(&pp.status, _Psyscall) if sched.gcwaiting.Load() { - systemstack(entersyscall_gcwait) + // Optimization: If there's a pending STW, do the equivalent of + // entersyscallblock here at the last minute and immediately give + // away our P. + systemstack(func() { + entersyscallHandleGCWait(trace) + }) + // systemstack clobbered gp.sched, so restore it. + save(pc, sp, bp) + } + // As soon as we switch to _Gsyscall, we are in danger of losing our P. + // We must not touch it after this point. + // + // Try to do a quick CAS to avoid calling into casgstatus in the common case. + // If we have a bubble, we need to fall into casgstatus. + if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Grunning, _Gsyscall) { + casgstatus(gp, _Grunning, _Gsyscall) + } + if staticLockRanking { + // casgstatus clobbers gp.sched via systemstack under staticLockRanking. Restore it. + save(pc, sp, bp) + } + if trace.ok() { + // N.B. We don't need to go on the systemstack because traceRelease is very + // carefully recursively nosplit. This also means we don't need to worry + // about clobbering gp.sched. + traceRelease(trace) + } + if sched.sysmonwait.Load() { + systemstack(entersyscallWakeSysmon) + // systemstack clobbered gp.sched, so restore it. save(pc, sp, bp) } - gp.m.locks-- } @@ -4663,7 +4682,7 @@ func entersyscall() { reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp) } -func entersyscall_sysmon() { +func entersyscallWakeSysmon() { lock(&sched.lock) if sched.sysmonwait.Load() { sched.sysmonwait.Store(false) @@ -4672,25 +4691,19 @@ func entersyscall_sysmon() { unlock(&sched.lock) } -func entersyscall_gcwait() { +func entersyscallHandleGCWait(trace traceLocker) { gp := getg() - pp := gp.m.oldp.ptr() lock(&sched.lock) - trace := traceAcquire() - if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) { + if sched.stopwait > 0 { + // Set our P to _Pgcstop so the STW can take it. + pp := gp.m.p.ptr() + pp.m = 0 + gp.m.p = 0 + atomic.Store(&pp.status, _Pgcstop) + if trace.ok() { - // This is a steal in the new tracer. While it's very likely - // that we were the ones to put this P into _Psyscall, between - // then and now it's totally possible it had been stolen and - // then put back into _Psyscall for us to acquire here. In such - // case ProcStop would be incorrect. - // - // TODO(mknyszek): Consider emitting a ProcStop instead when - // gp.m.syscalltick == pp.syscalltick, since then we know we never - // lost the P. - trace.ProcSteal(pp, true) - traceRelease(trace) + trace.ProcStop(pp) } sched.nGsyscallNoP.Add(1) pp.gcStopTime = nanotime() @@ -4698,8 +4711,6 @@ func entersyscall_gcwait() { if sched.stopwait--; sched.stopwait == 0 { notewakeup(&sched.stopnote) } - } else if trace.ok() { - traceRelease(trace) } unlock(&sched.lock) } @@ -4744,6 +4755,22 @@ func entersyscallblock() { throw("entersyscallblock") }) } + + // Once we switch to _Gsyscall, we can't safely touch + // our P anymore, so we need to hand it off beforehand. + // The tracer also needs to see the syscall before the P + // handoff, so the order here must be (1) trace, + // (2) handoff, (3) _Gsyscall switch. + trace := traceAcquire() + systemstack(func() { + if trace.ok() { + trace.GoSysCall() + } + handoffp(releasep()) + }) + // <-- + // Caution: we're in a small window where we are in _Grunning without a P. + // --> casgstatus(gp, _Grunning, _Gsyscall) if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp { systemstack(func() { @@ -4757,8 +4784,11 @@ func entersyscallblock() { throw("entersyscallblock") }) } - - systemstack(entersyscallblock_handoff) + if trace.ok() { + systemstack(func() { + traceRelease(trace) + }) + } // Resave for traceback during blocked call. save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp()) @@ -4766,15 +4796,6 @@ func entersyscallblock() { gp.m.locks-- } -func entersyscallblock_handoff() { - trace := traceAcquire() - if trace.ok() { - trace.GoSysCall() - traceRelease(trace) - } - handoffp(releasep()) -} - // The goroutine g exited its system call. // Arrange for it to run on a cpu again. // This is called only from the go syscall library, not @@ -4802,13 +4823,92 @@ func exitsyscall() { if sys.GetCallerSP() > gp.syscallsp { throw("exitsyscall: syscall frame is no longer valid") } - gp.waitsince = 0 + + if sched.stopwait == freezeStopWait { + // Wedge ourselves if there's an outstanding freezetheworld. + // If we transition to running, we might end up with our traceback + // being taken twice. + systemstack(func() { + lock(&deadlock) + lock(&deadlock) + }) + } + + // Optimistically assume we're going to keep running, and switch to running. + // Before this point, our P wiring is not ours. Once we get past this point, + // we can access our P if we have it, otherwise we lost it. + // + // N.B. Because we're transitioning to _Grunning here, traceAcquire doesn't + // need to be held ahead of time. We're effectively atomic with respect to + // the tracer because we're non-preemptible and in the runtime. It can't stop + // us to read a bad status. + // + // Try to do a quick CAS to avoid calling into casgstatus in the common case. + // If we have a bubble, we need to fall into casgstatus. + if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Gsyscall, _Grunning) { + casgstatus(gp, _Gsyscall, _Grunning) + } + + // Caution: we're in a window where we may be in _Grunning without a P. + // Either we will grab a P or call exitsyscall0, where we'll switch to + // _Grunnable. + + // Grab and clear our old P. oldp := gp.m.oldp.ptr() - gp.m.oldp = 0 - if exitsyscallfast(oldp) { - // When exitsyscallfast returns success, we have a P so can now use - // write barriers + gp.m.oldp.set(nil) + + // Check if we still have a P, and if not, try to acquire an idle P. + pp := gp.m.p.ptr() + if pp != nil { + // Fast path: we still have our P. Just emit a syscall exit event. + if trace := traceAcquire(); trace.ok() { + systemstack(func() { + // The truth is we truly never lost the P, but syscalltick + // is used to indicate whether the P should be treated as + // lost anyway. For example, when syscalltick is trashed by + // dropm. + // + // TODO(mknyszek): Consider a more explicit mechanism for this. + // Then syscalltick doesn't need to be trashed, and can be used + // exclusively by sysmon for deciding when it's time to retake. + if pp.syscalltick == gp.m.syscalltick { + trace.GoSysExit(false) + } else { + // Since we need to pretend we lost the P, but nobody ever + // took it, we need a ProcSteal event to model the loss. + // Then, continue with everything else we'd do if we lost + // the P. + trace.ProcSteal(pp) + trace.ProcStart() + trace.GoSysExit(true) + trace.GoStart() + } + traceRelease(trace) + }) + } + } else { + // Slow path: we lost our P. Try to get another one. + systemstack(func() { + // Try to get some other P. + if pp := exitsyscallTryGetP(oldp); pp != nil { + // Install the P. + acquirepNoTrace(pp) + + // We're going to start running again, so emit all the relevant events. + if trace := traceAcquire(); trace.ok() { + trace.ProcStart() + trace.GoSysExit(true) + trace.GoStart() + traceRelease(trace) + } + } + }) + pp = gp.m.p.ptr() + } + + // If we have a P, clean up and exit. + if pp != nil { if goroutineProfile.active { // Make sure that gp has had its stack written out to the goroutine // profile, exactly as it was when the goroutine profiler first @@ -4817,41 +4917,19 @@ func exitsyscall() { tryRecordGoroutineProfileWB(gp) }) } - trace := traceAcquire() - if trace.ok() { - lostP := oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick - systemstack(func() { - // Write out syscall exit eagerly. - // - // It's important that we write this *after* we know whether we - // lost our P or not (determined by exitsyscallfast). - trace.GoSysExit(lostP) - if lostP { - // We lost the P at some point, even though we got it back here. - // Trace that we're starting again, because there was a tracev2.GoSysBlock - // call somewhere in exitsyscallfast (indicating that this goroutine - // had blocked) and we're about to start running again. - trace.GoStart() - } - }) - } - // There's a cpu for us, so we can run. - gp.m.p.ptr().syscalltick++ - // We need to cas the status and scan before resuming... - casgstatus(gp, _Gsyscall, _Grunning) - if trace.ok() { - traceRelease(trace) - } + + // Increment the syscalltick for P, since we're exiting a syscall. + pp.syscalltick++ // Garbage collector isn't running (since we are), // so okay to clear syscallsp. gp.syscallsp = 0 gp.m.locks-- if gp.preempt { - // restore the preemption request in case we've cleared it in newstack + // Restore the preemption request in case we cleared it in newstack. gp.stackguard0 = stackPreempt } else { - // otherwise restore the real stackGuard, we've spoiled it in entersyscall/entersyscallblock + // Otherwise restore the real stackGuard, we clobbered it in entersyscall/entersyscallblock. gp.stackguard0 = gp.stack.lo + stackGuard } gp.throwsplit = false @@ -4860,14 +4938,13 @@ func exitsyscall() { // Scheduling of this goroutine is disabled. Gosched() } - return } - + // Slowest path: We couldn't get a P, so call into the scheduler. gp.m.locks-- // Call the scheduler. - mcall(exitsyscall0) + mcall(exitsyscallNoP) // Scheduler returned, so we're allowed to run now. // Delete the syscallsp information that we left for @@ -4880,78 +4957,38 @@ func exitsyscall() { gp.throwsplit = false } -//go:nosplit -func exitsyscallfast(oldp *p) bool { - // Freezetheworld sets stopwait but does not retake P's. - if sched.stopwait == freezeStopWait { - return false - } - - // Try to re-acquire the last P. - trace := traceAcquire() - if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) { - // There's a cpu for us, so we can run. - wirep(oldp) - exitsyscallfast_reacquired(trace) - if trace.ok() { - traceRelease(trace) - } - return true - } - if trace.ok() { - traceRelease(trace) - } - - // Try to get any other idle P. - if sched.pidle != 0 { - var ok bool - systemstack(func() { - ok = exitsyscallfast_pidle() - }) - if ok { - return true - } - } - return false -} - -// exitsyscallfast_reacquired is the exitsyscall path on which this G -// has successfully reacquired the P it was running on before the -// syscall. +// exitsyscall's attempt to try to get any P, if it's missing one. +// Returns true on success. // -//go:nosplit -func exitsyscallfast_reacquired(trace traceLocker) { - gp := getg() - if gp.m.syscalltick != gp.m.p.ptr().syscalltick { - if trace.ok() { - // The p was retaken and then enter into syscall again (since gp.m.syscalltick has changed). - // tracev2.GoSysBlock for this syscall was already emitted, - // but here we effectively retake the p from the new syscall running on the same p. - systemstack(func() { - // We're stealing the P. It's treated - // as if it temporarily stopped running. Then, start running. - trace.ProcSteal(gp.m.p.ptr(), true) - trace.ProcStart() - }) +// Must execute on the systemstack because exitsyscall is nosplit. +// +//go:systemstack +func exitsyscallTryGetP(oldp *p) *p { + // Try to steal our old P back. + if oldp != nil { + if thread, ok := setBlockOnExitSyscall(oldp); ok { + thread.takeP() + thread.resume() + sched.nGsyscallNoP.Add(-1) // takeP adds 1. + return oldp } - gp.m.p.ptr().syscalltick++ } -} -func exitsyscallfast_pidle() bool { - lock(&sched.lock) - pp, _ := pidleget(0) - if pp != nil && sched.sysmonwait.Load() { - sched.sysmonwait.Store(false) - notewakeup(&sched.sysmonnote) + // Try to get an idle P. + if sched.pidle != 0 { + lock(&sched.lock) + pp, _ := pidleget(0) + if pp != nil && sched.sysmonwait.Load() { + sched.sysmonwait.Store(false) + notewakeup(&sched.sysmonnote) + } + unlock(&sched.lock) + if pp != nil { + sched.nGsyscallNoP.Add(-1) + return pp + } } - unlock(&sched.lock) - if pp != nil { - sched.nGsyscallNoP.Add(-1) - acquirep(pp) - return true - } - return false + return nil } // exitsyscall slow path on g0. @@ -4960,11 +4997,10 @@ func exitsyscallfast_pidle() bool { // Called via mcall, so gp is the calling g from this M. // //go:nowritebarrierrec -func exitsyscall0(gp *g) { - var trace traceLocker +func exitsyscallNoP(gp *g) { traceExitingSyscall() - trace = traceAcquire() - casgstatus(gp, _Gsyscall, _Grunnable) + trace := traceAcquire() + casgstatus(gp, _Grunning, _Grunnable) traceExitedSyscall() if trace.ok() { // Write out syscall exit eagerly. @@ -5241,7 +5277,8 @@ func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreaso } gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo)) - // Get a goid and switch to runnable. Make all this atomic to the tracer. + // Get a goid and switch to runnable. This needs to happen under traceAcquire + // since it's a goroutine transition. See tracer invariants in trace.go. trace := traceAcquire() var status uint32 = _Grunnable if parked { @@ -5819,11 +5856,15 @@ func (pp *p) destroy() { // Move all timers to the local P. getg().m.p.ptr().timers.take(&pp.timers) - // Flush p's write barrier buffer. - if gcphase != _GCoff { - wbBufFlush1(pp) - pp.gcw.dispose() + // No need to flush p's write barrier buffer or span queue, as Ps + // cannot be destroyed during the mark phase. + if phase := gcphase; phase != _GCoff { + println("runtime: p id", pp.id, "destroyed during GC phase", phase) + throw("P destroyed while GC is running") } + // We should free the queues though. + pp.gcw.spanq.destroy() + clear(pp.sudogbuf[:]) pp.sudogcache = pp.sudogbuf[:0] pp.pinnerCache = nil @@ -5986,6 +6027,7 @@ func procresize(nprocs int32) *p { } var runnablePs *p + var runnablePsNeedM *p for i := nprocs - 1; i >= 0; i-- { pp := allp[i] if gp.m.p.ptr() == pp { @@ -5994,12 +6036,41 @@ func procresize(nprocs int32) *p { pp.status = _Pidle if runqempty(pp) { pidleput(pp, now) - } else { - pp.m.set(mget()) - pp.link.set(runnablePs) - runnablePs = pp + continue } + + // Prefer to run on the most recent M if it is + // available. + // + // Ps with no oldm (or for which oldm is already taken + // by an earlier P), we delay until all oldm Ps are + // handled. Otherwise, mget may return an M that a + // later P has in oldm. + var mp *m + if oldm := pp.oldm.get(); oldm != nil { + // Returns nil if oldm is not idle. + mp = mgetSpecific(oldm) + } + if mp == nil { + // Call mget later. + pp.link.set(runnablePsNeedM) + runnablePsNeedM = pp + continue + } + pp.m.set(mp) + pp.link.set(runnablePs) + runnablePs = pp } + for runnablePsNeedM != nil { + pp := runnablePsNeedM + runnablePsNeedM = pp.link.ptr() + + mp := mget() + pp.m.set(mp) + pp.link.set(runnablePs) + runnablePs = pp + } + stealOrder.reset(uint32(nprocs)) var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs)) @@ -6017,15 +6088,10 @@ func procresize(nprocs int32) *p { // //go:yeswritebarrierrec func acquirep(pp *p) { - // Do the part that isn't allowed to have write barriers. - wirep(pp) - - // Have p; write barriers now allowed. - - // Perform deferred mcache flush before this P can allocate - // from a potentially stale mcache. - pp.mcache.prepareForSweep() + // Do the work. + acquirepNoTrace(pp) + // Emit the event. trace := traceAcquire() if trace.ok() { trace.ProcStart() @@ -6033,6 +6099,25 @@ func acquirep(pp *p) { } } +// Internals of acquirep, just skipping the trace events. +// +//go:yeswritebarrierrec +func acquirepNoTrace(pp *p) { + // Do the part that isn't allowed to have write barriers. + wirep(pp) + + // Have p; write barriers now allowed. + + // The M we're associating with will be the old M after the next + // releasep. We must set this here because write barriers are not + // allowed in releasep. + pp.oldm = pp.m.ptr().self + + // Perform deferred mcache flush before this P can allocate + // from a potentially stale mcache. + pp.mcache.prepareForSweep() +} + // wirep is the first step of acquirep, which actually associates the // current M to pp. This is broken out so we can disallow write // barriers for this part, since we don't yet have a P. @@ -6378,73 +6463,205 @@ func retake(now int64) uint32 { // temporarily drop the allpLock. Hence, we need to re-fetch // allp each time around the loop. for i := 0; i < len(allp); i++ { + // Quickly filter out non-running Ps. Running Ps are either + // in a syscall or are actually executing. Idle Ps don't + // need to be retaken. + // + // This is best-effort, so it's OK that it's racy. Our target + // is to retake Ps that have been running or in a syscall for + // a long time (milliseconds), so the state has plenty of time + // to stabilize. pp := allp[i] - if pp == nil { - // This can happen if procresize has grown + if pp == nil || atomic.Load(&pp.status) != _Prunning { + // pp can be nil if procresize has grown // allp but not yet created new Ps. continue } pd := &pp.sysmontick - s := pp.status sysretake := false - if s == _Prunning || s == _Psyscall { - // Preempt G if it's running on the same schedtick for - // too long. This could be from a single long-running - // goroutine or a sequence of goroutines run via - // runnext, which share a single schedtick time slice. - t := int64(pp.schedtick) - if int64(pd.schedtick) != t { - pd.schedtick = uint32(t) - pd.schedwhen = now - } else if pd.schedwhen+forcePreemptNS <= now { - preemptone(pp) - // In case of syscall, preemptone() doesn't - // work, because there is no M wired to P. - sysretake = true - } + + // Preempt G if it's running on the same schedtick for + // too long. This could be from a single long-running + // goroutine or a sequence of goroutines run via + // runnext, which share a single schedtick time slice. + schedt := int64(pp.schedtick) + if int64(pd.schedtick) != schedt { + pd.schedtick = uint32(schedt) + pd.schedwhen = now + } else if pd.schedwhen+forcePreemptNS <= now { + preemptone(pp) + // If pp is in a syscall, preemptone doesn't work. + // The goroutine nor the thread can respond to a + // preemption request because they're not in Go code, + // so we need to take the P ourselves. + sysretake = true } - if s == _Psyscall { - // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us). - t := int64(pp.syscalltick) - if !sysretake && int64(pd.syscalltick) != t { - pd.syscalltick = uint32(t) - pd.syscallwhen = now - continue - } - // On the one hand we don't want to retake Ps if there is no other work to do, - // but on the other hand we want to retake them eventually - // because they can prevent the sysmon thread from deep sleep. - if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now { - continue - } - // Drop allpLock so we can take sched.lock. - unlock(&allpLock) - // Need to decrement number of idle locked M's - // (pretending that one more is running) before the CAS. - // Otherwise the M from which we retake can exit the syscall, - // increment nmidle and report deadlock. - incidlelocked(-1) - trace := traceAcquire() - if atomic.Cas(&pp.status, s, _Pidle) { - if trace.ok() { - trace.ProcSteal(pp, false) - traceRelease(trace) - } - sched.nGsyscallNoP.Add(1) - n++ - pp.syscalltick++ - handoffp(pp) - } else if trace.ok() { - traceRelease(trace) - } - incidlelocked(1) - lock(&allpLock) + + // Drop allpLock so we can take sched.lock. + unlock(&allpLock) + + // Need to decrement number of idle locked M's (pretending that + // one more is running) before we take the P and resume. + // Otherwise the M from which we retake can exit the syscall, + // increment nmidle and report deadlock. + // + // Can't call incidlelocked once we setBlockOnExitSyscall, due + // to a lock ordering violation between sched.lock and _Gscan. + incidlelocked(-1) + + // Try to prevent the P from continuing in the syscall, if it's in one at all. + thread, ok := setBlockOnExitSyscall(pp) + if !ok { + // Not in a syscall, or something changed out from under us. + goto done } + + // Retake the P if it's there for more than 1 sysmon tick (at least 20us). + if syst := int64(pp.syscalltick); !sysretake && int64(pd.syscalltick) != syst { + pd.syscalltick = uint32(syst) + pd.syscallwhen = now + thread.resume() + goto done + } + + // On the one hand we don't want to retake Ps if there is no other work to do, + // but on the other hand we want to retake them eventually + // because they can prevent the sysmon thread from deep sleep. + if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now { + thread.resume() + goto done + } + + // Take the P. Note: because we have the scan bit, the goroutine + // is at worst stuck spinning in exitsyscall. + thread.takeP() + thread.resume() + n++ + + // Handoff the P for some other thread to run it. + handoffp(pp) + + // The P has been handed off to another thread, so risk of a false + // deadlock report while we hold onto it is gone. + done: + incidlelocked(1) + lock(&allpLock) } unlock(&allpLock) return uint32(n) } +// syscallingThread represents a thread in a system call that temporarily +// cannot advance out of the system call. +type syscallingThread struct { + gp *g + mp *m + pp *p + status uint32 +} + +// setBlockOnExitSyscall prevents pp's thread from advancing out of +// exitsyscall. On success, returns the g/m/p state of the thread +// and true. At that point, the caller owns the g/m/p links referenced, +// the goroutine is in _Gsyscall, and prevented from transitioning out +// of it. On failure, it returns false, and none of these guarantees are +// made. +// +// Callers must call resume on the resulting thread state once +// they're done with thread, otherwise it will remain blocked forever. +// +// This function races with state changes on pp, and thus may fail +// if pp is not in a system call, or exits a system call concurrently +// with this function. However, this function is safe to call without +// any additional synchronization. +func setBlockOnExitSyscall(pp *p) (syscallingThread, bool) { + if pp.status != _Prunning { + return syscallingThread{}, false + } + // Be very careful here, these reads are intentionally racy. + // Once we notice the G is in _Gsyscall, acquire its scan bit, + // and validate that it's still connected to the *same* M and P, + // we can actually get to work. Holding the scan bit will prevent + // the G from exiting the syscall. + // + // Our goal here is to interrupt long syscalls. If it turns out + // that we're wrong and the G switched to another syscall while + // we were trying to do this, that's completely fine. It's + // probably making more frequent syscalls and the typical + // preemption paths should be effective. + mp := pp.m.ptr() + if mp == nil { + // Nothing to do. + return syscallingThread{}, false + } + gp := mp.curg + if gp == nil { + // Nothing to do. + return syscallingThread{}, false + } + status := readgstatus(gp) &^ _Gscan + + // A goroutine is considered in a syscall, and may have a corresponding + // P, if it's in _Gsyscall *or* _Gdeadextra. In the latter case, it's an + // extra M goroutine. + if status != _Gsyscall && status != _Gdeadextra { + // Not in a syscall, nothing to do. + return syscallingThread{}, false + } + if !castogscanstatus(gp, status, status|_Gscan) { + // Not in _Gsyscall or _Gdeadextra anymore. Nothing to do. + return syscallingThread{}, false + } + if gp.m != mp || gp.m.p.ptr() != pp { + // This is not what we originally observed. Nothing to do. + casfrom_Gscanstatus(gp, status|_Gscan, status) + return syscallingThread{}, false + } + return syscallingThread{gp, mp, pp, status}, true +} + +// gcstopP unwires the P attached to the syscalling thread +// and moves it into the _Pgcstop state. +// +// The caller must be stopping the world. +func (s syscallingThread) gcstopP() { + assertLockHeld(&sched.lock) + + s.releaseP(_Pgcstop) + s.pp.gcStopTime = nanotime() + sched.stopwait-- +} + +// takeP unwires the P attached to the syscalling thread +// and moves it into the _Pidle state. +func (s syscallingThread) takeP() { + s.releaseP(_Pidle) +} + +// releaseP unwires the P from the syscalling thread, moving +// it to the provided state. Callers should prefer to use +// takeP and gcstopP. +func (s syscallingThread) releaseP(state uint32) { + if state != _Pidle && state != _Pgcstop { + throw("attempted to release P into a bad state") + } + trace := traceAcquire() + s.pp.m = 0 + s.mp.p = 0 + atomic.Store(&s.pp.status, state) + if trace.ok() { + trace.ProcSteal(s.pp) + traceRelease(trace) + } + sched.nGsyscallNoP.Add(1) + s.pp.syscalltick++ +} + +// resume allows a syscalling thread to advance beyond exitsyscall. +func (s syscallingThread) resume() { + casfrom_Gscanstatus(s.gp, s.status|_Gscan, s.status) +} + // Tell all goroutines that they have been preempted and they should stop. // This function is purely best-effort. It can fail to inform a goroutine if a // processor just started running it. @@ -6482,6 +6699,10 @@ func preemptone(pp *p) bool { if gp == nil || gp == mp.g0 { return false } + if readgstatus(gp)&^_Gscan == _Gsyscall { + // Don't bother trying to preempt a goroutine in a syscall. + return false + } gp.preempt = true @@ -6806,8 +7027,7 @@ func schedEnabled(gp *g) bool { func mput(mp *m) { assertLockHeld(&sched.lock) - mp.schedlink = sched.midle - sched.midle.set(mp) + sched.midle.push(unsafe.Pointer(mp)) sched.nmidle++ checkdead() } @@ -6820,14 +7040,34 @@ func mput(mp *m) { func mget() *m { assertLockHeld(&sched.lock) - mp := sched.midle.ptr() + mp := (*m)(sched.midle.pop()) if mp != nil { - sched.midle = mp.schedlink sched.nmidle-- } return mp } +// Try to get a specific m from midle list. Returns nil if it isn't on the +// midle list. +// +// sched.lock must be held. +// May run during STW, so write barriers are not allowed. +// +//go:nowritebarrierrec +func mgetSpecific(mp *m) *m { + assertLockHeld(&sched.lock) + + if mp.idleNode.prev == 0 && mp.idleNode.next == 0 { + // Not on the list. + return nil + } + + sched.midle.remove(unsafe.Pointer(mp)) + sched.nmidle-- + + return mp +} + // Put gp on the global runnable queue. // sched.lock must be held. // May run during STW, so write barriers are not allowed. diff --git a/src/runtime/proc_test.go b/src/runtime/proc_test.go index 3b606f62e43..b3084f4895f 100644 --- a/src/runtime/proc_test.go +++ b/src/runtime/proc_test.go @@ -5,13 +5,18 @@ package runtime_test import ( + "bytes" "fmt" "internal/race" "internal/testenv" + "internal/trace" + "internal/trace/testtrace" + "io" "math" "net" "runtime" "runtime/debug" + "slices" "strings" "sync" "sync/atomic" @@ -696,7 +701,6 @@ func BenchmarkCreateGoroutinesCapture(b *testing.B) { var wg sync.WaitGroup wg.Add(N) for i := 0; i < N; i++ { - i := i go func() { if i >= N { b.Logf("bad") // just to capture b @@ -1169,3 +1173,364 @@ func TestBigGOMAXPROCS(t *testing.T) { t.Errorf("output:\n%s\nwanted:\nunknown function: NonexistentTest", output) } } + +type goroutineState struct { + G trace.GoID // This goroutine. + P trace.ProcID // Most recent P this goroutine ran on. + M trace.ThreadID // Most recent M this goroutine ran on. +} + +func newGoroutineState(g trace.GoID) *goroutineState { + return &goroutineState{ + G: g, + P: trace.NoProc, + M: trace.NoThread, + } +} + +// TestTraceSTW verifies that goroutines continue running on the same M and P +// after a STW. +func TestTraceSTW(t *testing.T) { + // Across STW, the runtime attempts to keep goroutines running on the + // same P and the P running on the same M. It does this by keeping + // goroutines in the P's local runq, and remembering which M the P ran + // on before STW and preferring that M when restarting. + // + // This test verifies that affinity by analyzing a trace of testprog + // TraceSTW. + // + // The affinity across STW is best-effort, so have to allow some + // failure rate, thus we test many times and ensure the error rate is + // low. + // + // The expected affinity can fail for a variety of reasons. The most + // obvious is that while procresize assigns Ps back to their original + // M, startTheWorldWithSema calls wakep to start a spinning M. The + // spinning M may steal a goroutine from another P if that P is too + // slow to start. + + if testing.Short() { + t.Skip("skipping in -short mode") + } + + if runtime.NumCPU() < 4 { + t.Skip("This test sets GOMAXPROCS=4 and wants to avoid thread descheduling as much as possible. Skip on machines with less than 4 CPUs") + } + + const runs = 50 + + var errors int + for i := range runs { + err := runTestTracesSTW(t, i) + if err != nil { + t.Logf("Run %d failed: %v", i, err) + errors++ + } + } + + pct := float64(errors)/float64(runs) + t.Logf("Errors: %d/%d = %f%%", errors, runs, 100*pct) + if pct > 0.25 { + t.Errorf("Error rate too high") + } +} + +func runTestTracesSTW(t *testing.T, run int) (err error) { + t.Logf("Run %d", run) + + // By default, TSAN sleeps for 1s at exit to allow background + // goroutines to race. This slows down execution for this test far too + // much, since we are running 50 iterations, so disable the sleep. + // + // Outside of race mode, GORACE does nothing. + buf := []byte(runTestProg(t, "testprog", "TraceSTW", "GORACE=atexit_sleep_ms=0")) + + // We locally "fail" the run (return an error) if the trace exhibits + // unwanted scheduling. i.e., the target goroutines did not remain on + // the same P/M. + // + // We fail the entire test (t.Fatal) for other cases that should never + // occur, such as a trace parse error. + defer func() { + if err != nil || t.Failed() { + testtrace.Dump(t, fmt.Sprintf("TestTraceSTW-run%d", run), []byte(buf), false) + } + }() + + br, err := trace.NewReader(bytes.NewReader(buf)) + if err != nil { + t.Fatalf("NewReader got err %v want nil", err) + } + + var targetGoroutines []*goroutineState + findGoroutine := func(goid trace.GoID) *goroutineState { + for _, gs := range targetGoroutines { + if gs.G == goid { + return gs + } + } + return nil + } + findProc := func(pid trace.ProcID) *goroutineState { + for _, gs := range targetGoroutines { + if gs.P == pid { + return gs + } + } + return nil + } + + // 1. Find the goroutine IDs for the target goroutines. This will be in + // the StateTransition from NotExist. + // + // 2. Once found, track which M and P the target goroutines run on until... + // + // 3. Look for the "TraceSTW" "start" log message, where we commit the + // target goroutines' "before" M and P. + // + // N.B. We must do (1) and (2) together because the first target + // goroutine may start running before the second is created. +findStart: + for { + ev, err := br.ReadEvent() + if err == io.EOF { + // Reached the end of the trace without finding case (3). + t.Fatalf("Trace missing start log message") + } + if err != nil { + t.Fatalf("ReadEvent got err %v want nil", err) + } + t.Logf("Event: %s", ev.String()) + + switch ev.Kind() { + case trace.EventStateTransition: + st := ev.StateTransition() + if st.Resource.Kind != trace.ResourceGoroutine { + continue + } + + goid := st.Resource.Goroutine() + from, to := st.Goroutine() + + // Potentially case (1): Goroutine creation. + if from == trace.GoNotExist { + for sf := range st.Stack.Frames() { + if sf.Func == "main.traceSTWTarget" { + targetGoroutines = append(targetGoroutines, newGoroutineState(goid)) + t.Logf("Identified target goroutine id %d", goid) + } + + // Always break, the goroutine entrypoint is always the + // first frame. + break + } + } + + // Potentially case (2): Goroutine running. + if to == trace.GoRunning { + gs := findGoroutine(goid) + if gs == nil { + continue + } + gs.P = ev.Proc() + gs.M = ev.Thread() + t.Logf("G %d running on P %d M %d", gs.G, gs.P, gs.M) + } + case trace.EventLog: + // Potentially case (3): Start log event. + log := ev.Log() + if log.Category != "TraceSTW" { + continue + } + if log.Message != "start" { + t.Fatalf("Log message got %s want start", log.Message) + } + + // Found start point, move on to next stage. + t.Logf("Found start message") + break findStart + } + } + + t.Log("Target goroutines:") + for _, gs := range targetGoroutines { + t.Logf("%+v", gs) + } + + if len(targetGoroutines) != 2 { + t.Fatalf("len(targetGoroutines) got %d want 2", len(targetGoroutines)) + } + + for _, gs := range targetGoroutines { + if gs.P == trace.NoProc { + t.Fatalf("Goroutine %+v not running on a P", gs) + } + if gs.M == trace.NoThread { + t.Fatalf("Goroutine %+v not running on an M", gs) + } + } + + // The test continues until we see the "end" log message. + // + // What we want to observe is that the target goroutines run only on + // the original P and M. + // + // They will be stopped by STW [1], but should resume on the original P + // and M. + // + // However, this is best effort. For example, startTheWorld wakep's a + // spinning M. If the original M is slow to restart (e.g., due to poor + // kernel scheduling), the spinning M may legally steal the goroutine + // and run it instead. + // + // In practice, we see this occur frequently on builders, likely + // because they are overcommitted on CPU. Thus, we instead check + // slightly more constrained properties: + // - The original P must run on the original M (if it runs at all). + // - The original P must run the original G before anything else, + // unless that G has already run elsewhere. + // + // This allows a spinning M to steal the G from a slow-to-start M, but + // does not allow the original P to just flat out run something + // completely different from expected. + // + // Note this is still somewhat racy: the spinning M may steal the + // target G, but before it marks the target G as running, the original + // P runs an alternative G. This test will fail that case, even though + // it is legitimate. We allow that failure because such a race should + // be very rare, particularly because the test process usually has no + // other runnable goroutines. + // + // [1] This is slightly fragile because there is a small window between + // the "start" log and actual STW during which the target goroutines + // could legitimately migrate. + var stwSeen bool + var pRunning []trace.ProcID + var gRunning []trace.GoID +findEnd: + for { + ev, err := br.ReadEvent() + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("ReadEvent got err %v want nil", err) + } + t.Logf("Event: %s", ev.String()) + + switch ev.Kind() { + case trace.EventStateTransition: + st := ev.StateTransition() + switch st.Resource.Kind { + case trace.ResourceProc: + p := st.Resource.Proc() + _, to := st.Proc() + + // Proc running. Ensure it didn't migrate. + if to == trace.ProcRunning { + gs := findProc(p) + if gs == nil { + continue + } + + if slices.Contains(pRunning, p) { + // Only check the first + // transition to running. + // Afterwards it is free to + // migrate anywhere. + continue + } + pRunning = append(pRunning, p) + + m := ev.Thread() + if m != gs.M { + t.Logf("Proc %d running on M %d want M %d", p, m, gs.M) + return fmt.Errorf("P did not remain on M") + } + } + case trace.ResourceGoroutine: + goid := st.Resource.Goroutine() + _, to := st.Goroutine() + + // Goroutine running. Ensure it didn't migrate. + if to == trace.GoRunning { + p := ev.Proc() + m := ev.Thread() + + gs := findGoroutine(goid) + if gs == nil { + // This isn't a target + // goroutine. Is it a target P? + // That shouldn't run anything + // other than the target G. + gs = findProc(p) + if gs == nil { + continue + } + + if slices.Contains(gRunning, gs.G) { + // This P's target G ran elsewhere. This probably + // means that this P was slow to start, so + // another P stole it. That isn't ideal, but + // we'll allow it. + continue + } + + t.Logf("Goroutine %d running on P %d M %d want this P to run G %d", goid, p, m, gs.G) + return fmt.Errorf("P ran incorrect goroutine") + } + + if !slices.Contains(gRunning, goid) { + gRunning = append(gRunning, goid) + } + + if p != gs.P || m != gs.M { + t.Logf("Goroutine %d running on P %d M %d want P %d M %d", goid, p, m, gs.P, gs.M) + // We don't want this to occur, + // but allow it for cases of + // bad kernel scheduling. See + // "The test continues" comment + // above. + } + } + } + case trace.EventLog: + // Potentially end log event. + log := ev.Log() + if log.Category != "TraceSTW" { + continue + } + if log.Message != "end" { + t.Fatalf("Log message got %s want end", log.Message) + } + + // Found end point. + t.Logf("Found end message") + break findEnd + case trace.EventRangeBegin: + r := ev.Range() + if r.Name == "stop-the-world (read mem stats)" { + // Note when we see the STW begin. This is not + // load bearing; it's purpose is simply to fail + // the test if we manage to remove the STW from + // ReadMemStat, so we remember to change this + // test to add some new source of STW. + stwSeen = true + } + } + } + + if !stwSeen { + t.Fatal("No STW in the test trace") + } + + return nil +} + +func TestMexitSTW(t *testing.T) { + got := runTestProg(t, "testprog", "mexitSTW") + want := "OK\n" + if got != want { + t.Fatalf("expected %q, but got:\n%s", want, got) + } +} diff --git a/src/runtime/rt0_freebsd_riscv64.s b/src/runtime/rt0_freebsd_riscv64.s index dc46b704766..f3fb4ffbe75 100644 --- a/src/runtime/rt0_freebsd_riscv64.s +++ b/src/runtime/rt0_freebsd_riscv64.s @@ -12,100 +12,8 @@ TEXT _rt0_riscv64_freebsd(SB),NOSPLIT|NOFRAME,$0 // When building with -buildmode=c-shared, this symbol is called when the shared // library is loaded. -TEXT _rt0_riscv64_freebsd_lib(SB),NOSPLIT,$224 - // Preserve callee-save registers, along with X1 (LR). - MOV X1, (8*3)(X2) - MOV X8, (8*4)(X2) - MOV X9, (8*5)(X2) - MOV X18, (8*6)(X2) - MOV X19, (8*7)(X2) - MOV X20, (8*8)(X2) - MOV X21, (8*9)(X2) - MOV X22, (8*10)(X2) - MOV X23, (8*11)(X2) - MOV X24, (8*12)(X2) - MOV X25, (8*13)(X2) - MOV X26, (8*14)(X2) - MOV g, (8*15)(X2) - MOVD F8, (8*16)(X2) - MOVD F9, (8*17)(X2) - MOVD F18, (8*18)(X2) - MOVD F19, (8*19)(X2) - MOVD F20, (8*20)(X2) - MOVD F21, (8*21)(X2) - MOVD F22, (8*22)(X2) - MOVD F23, (8*23)(X2) - MOVD F24, (8*24)(X2) - MOVD F25, (8*25)(X2) - MOVD F26, (8*26)(X2) - MOVD F27, (8*27)(X2) - - // Initialize g as nil in case of using g later e.g. sigaction in cgo_sigaction.go - MOV X0, g - - MOV A0, _rt0_riscv64_freebsd_lib_argc<>(SB) - MOV A1, _rt0_riscv64_freebsd_lib_argv<>(SB) - - // Synchronous initialization. - MOV $runtime·libpreinit(SB), T0 - JALR RA, T0 - - // Create a new thread to do the runtime initialization and return. - MOV _cgo_sys_thread_create(SB), T0 - BEQZ T0, nocgo - MOV $_rt0_riscv64_freebsd_lib_go(SB), A0 - MOV $0, A1 - JALR RA, T0 - JMP restore - -nocgo: - MOV $0x800000, A0 // stacksize = 8192KB - MOV $_rt0_riscv64_freebsd_lib_go(SB), A1 - MOV A0, 8(X2) - MOV A1, 16(X2) - MOV $runtime·newosproc0(SB), T0 - JALR RA, T0 - -restore: - // Restore callee-save registers, along with X1 (LR). - MOV (8*3)(X2), X1 - MOV (8*4)(X2), X8 - MOV (8*5)(X2), X9 - MOV (8*6)(X2), X18 - MOV (8*7)(X2), X19 - MOV (8*8)(X2), X20 - MOV (8*9)(X2), X21 - MOV (8*10)(X2), X22 - MOV (8*11)(X2), X23 - MOV (8*12)(X2), X24 - MOV (8*13)(X2), X25 - MOV (8*14)(X2), X26 - MOV (8*15)(X2), g - MOVD (8*16)(X2), F8 - MOVD (8*17)(X2), F9 - MOVD (8*18)(X2), F18 - MOVD (8*19)(X2), F19 - MOVD (8*20)(X2), F20 - MOVD (8*21)(X2), F21 - MOVD (8*22)(X2), F22 - MOVD (8*23)(X2), F23 - MOVD (8*24)(X2), F24 - MOVD (8*25)(X2), F25 - MOVD (8*26)(X2), F26 - MOVD (8*27)(X2), F27 - - RET - -TEXT _rt0_riscv64_freebsd_lib_go(SB),NOSPLIT,$0 - MOV _rt0_riscv64_freebsd_lib_argc<>(SB), A0 - MOV _rt0_riscv64_freebsd_lib_argv<>(SB), A1 - MOV $runtime·rt0_go(SB), T0 - JALR ZERO, T0 - -DATA _rt0_riscv64_freebsd_lib_argc<>(SB)/8, $0 -GLOBL _rt0_riscv64_freebsd_lib_argc<>(SB),NOPTR, $8 -DATA _rt0_riscv64_freebsd_lib_argv<>(SB)/8, $0 -GLOBL _rt0_riscv64_freebsd_lib_argv<>(SB),NOPTR, $8 +TEXT _rt0_riscv64_freebsd_lib(SB),NOSPLIT,$0 + JMP _rt0_riscv64_lib(SB) TEXT main(SB),NOSPLIT|NOFRAME,$0 MOV $runtime·rt0_go(SB), T0 diff --git a/src/runtime/rt0_linux_riscv64.s b/src/runtime/rt0_linux_riscv64.s index d6b8ac85dca..4139c9bcecd 100644 --- a/src/runtime/rt0_linux_riscv64.s +++ b/src/runtime/rt0_linux_riscv64.s @@ -11,100 +11,8 @@ TEXT _rt0_riscv64_linux(SB),NOSPLIT|NOFRAME,$0 // When building with -buildmode=c-shared, this symbol is called when the shared // library is loaded. -TEXT _rt0_riscv64_linux_lib(SB),NOSPLIT,$224 - // Preserve callee-save registers, along with X1 (LR). - MOV X1, (8*3)(X2) - MOV X8, (8*4)(X2) - MOV X9, (8*5)(X2) - MOV X18, (8*6)(X2) - MOV X19, (8*7)(X2) - MOV X20, (8*8)(X2) - MOV X21, (8*9)(X2) - MOV X22, (8*10)(X2) - MOV X23, (8*11)(X2) - MOV X24, (8*12)(X2) - MOV X25, (8*13)(X2) - MOV X26, (8*14)(X2) - MOV g, (8*15)(X2) - MOVD F8, (8*16)(X2) - MOVD F9, (8*17)(X2) - MOVD F18, (8*18)(X2) - MOVD F19, (8*19)(X2) - MOVD F20, (8*20)(X2) - MOVD F21, (8*21)(X2) - MOVD F22, (8*22)(X2) - MOVD F23, (8*23)(X2) - MOVD F24, (8*24)(X2) - MOVD F25, (8*25)(X2) - MOVD F26, (8*26)(X2) - MOVD F27, (8*27)(X2) - - // Initialize g as nil in case of using g later e.g. sigaction in cgo_sigaction.go - MOV X0, g - - MOV A0, _rt0_riscv64_linux_lib_argc<>(SB) - MOV A1, _rt0_riscv64_linux_lib_argv<>(SB) - - // Synchronous initialization. - MOV $runtime·libpreinit(SB), T0 - JALR RA, T0 - - // Create a new thread to do the runtime initialization and return. - MOV _cgo_sys_thread_create(SB), T0 - BEQZ T0, nocgo - MOV $_rt0_riscv64_linux_lib_go(SB), A0 - MOV $0, A1 - JALR RA, T0 - JMP restore - -nocgo: - MOV $0x800000, A0 // stacksize = 8192KB - MOV $_rt0_riscv64_linux_lib_go(SB), A1 - MOV A0, 8(X2) - MOV A1, 16(X2) - MOV $runtime·newosproc0(SB), T0 - JALR RA, T0 - -restore: - // Restore callee-save registers, along with X1 (LR). - MOV (8*3)(X2), X1 - MOV (8*4)(X2), X8 - MOV (8*5)(X2), X9 - MOV (8*6)(X2), X18 - MOV (8*7)(X2), X19 - MOV (8*8)(X2), X20 - MOV (8*9)(X2), X21 - MOV (8*10)(X2), X22 - MOV (8*11)(X2), X23 - MOV (8*12)(X2), X24 - MOV (8*13)(X2), X25 - MOV (8*14)(X2), X26 - MOV (8*15)(X2), g - MOVD (8*16)(X2), F8 - MOVD (8*17)(X2), F9 - MOVD (8*18)(X2), F18 - MOVD (8*19)(X2), F19 - MOVD (8*20)(X2), F20 - MOVD (8*21)(X2), F21 - MOVD (8*22)(X2), F22 - MOVD (8*23)(X2), F23 - MOVD (8*24)(X2), F24 - MOVD (8*25)(X2), F25 - MOVD (8*26)(X2), F26 - MOVD (8*27)(X2), F27 - - RET - -TEXT _rt0_riscv64_linux_lib_go(SB),NOSPLIT,$0 - MOV _rt0_riscv64_linux_lib_argc<>(SB), A0 - MOV _rt0_riscv64_linux_lib_argv<>(SB), A1 - MOV $runtime·rt0_go(SB), T0 - JALR ZERO, T0 - -DATA _rt0_riscv64_linux_lib_argc<>(SB)/8, $0 -GLOBL _rt0_riscv64_linux_lib_argc<>(SB),NOPTR, $8 -DATA _rt0_riscv64_linux_lib_argv<>(SB)/8, $0 -GLOBL _rt0_riscv64_linux_lib_argv<>(SB),NOPTR, $8 +TEXT _rt0_riscv64_linux_lib(SB),NOSPLIT,$0 + JMP _rt0_riscv64_lib(SB) TEXT main(SB),NOSPLIT|NOFRAME,$0 diff --git a/src/runtime/runtime-gdb.py b/src/runtime/runtime-gdb.py index 345a59605e8..9d2446a1ebe 100644 --- a/src/runtime/runtime-gdb.py +++ b/src/runtime/runtime-gdb.py @@ -46,11 +46,13 @@ G_MORIBUND_UNUSED = read_runtime_const("'runtime._Gmoribund_unused'", 5) G_DEAD = read_runtime_const("'runtime._Gdead'", 6) G_ENQUEUE_UNUSED = read_runtime_const("'runtime._Genqueue_unused'", 7) G_COPYSTACK = read_runtime_const("'runtime._Gcopystack'", 8) +G_DEADEXTRA = read_runtime_const("'runtime._Gdeadextra'", 10) G_SCAN = read_runtime_const("'runtime._Gscan'", 0x1000) G_SCANRUNNABLE = G_SCAN+G_RUNNABLE G_SCANRUNNING = G_SCAN+G_RUNNING G_SCANSYSCALL = G_SCAN+G_SYSCALL G_SCANWAITING = G_SCAN+G_WAITING +G_SCANEXTRA = G_SCAN+G_DEADEXTRA sts = { G_IDLE: 'idle', @@ -62,11 +64,13 @@ sts = { G_DEAD: 'dead', G_ENQUEUE_UNUSED: 'enqueue', G_COPYSTACK: 'copystack', + G_DEADEXTRA: 'extra', G_SCAN: 'scan', G_SCANRUNNABLE: 'runnable+s', G_SCANRUNNING: 'running+s', G_SCANSYSCALL: 'syscall+s', G_SCANWAITING: 'waiting+s', + G_SCANEXTRA: 'extra+s', } @@ -524,7 +528,7 @@ class GoroutinesCmd(gdb.Command): # args = gdb.string_to_argv(arg) vp = gdb.lookup_type('void').pointer() for ptr in SliceValue(gdb.parse_and_eval("'runtime.allgs'")): - if ptr['atomicstatus']['value'] == G_DEAD: + if ptr['atomicstatus']['value'] in [G_DEAD, G_DEADEXTRA]: continue s = ' ' if ptr['m']: @@ -549,7 +553,7 @@ def find_goroutine(goid): """ vp = gdb.lookup_type('void').pointer() for ptr in SliceValue(gdb.parse_and_eval("'runtime.allgs'")): - if ptr['atomicstatus']['value'] == G_DEAD: + if ptr['atomicstatus']['value'] in [G_DEAD, G_DEADEXTRA]: continue if ptr['goid'] == goid: break diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go index 15b546783b5..43e4c142362 100644 --- a/src/runtime/runtime1.go +++ b/src/runtime/runtime1.go @@ -8,7 +8,7 @@ import ( "internal/bytealg" "internal/goarch" "internal/runtime/atomic" - "internal/runtime/strconv" + "internal/strconv" "unsafe" ) @@ -212,10 +212,6 @@ func check() { throw("bad unsafe.Sizeof y1") } - if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 { - throw("bad timediv") - } - var z uint32 z = 1 if !atomic.Cas(&z, 1, 2) { @@ -532,17 +528,17 @@ func parsegodebug(godebug string, seen map[string]bool) { // is int, not int32, and should only be updated // if specified in GODEBUG. if seen == nil && key == "memprofilerate" { - if n, ok := strconv.Atoi(value); ok { + if n, err := strconv.Atoi(value); err == nil { MemProfileRate = n } } else { for _, v := range dbgvars { if v.name == key { - if n, ok := strconv.Atoi32(value); ok { + if n, err := strconv.ParseInt(value, 10, 32); err == nil { if seen == nil && v.value != nil { - *v.value = n + *v.value = int32(n) } else if v.atomic != nil { - v.atomic.Store(n) + v.atomic.Store(int32(n)) } } } @@ -578,7 +574,7 @@ func setTraceback(level string) { fallthrough default: t = tracebackAll - if n, ok := strconv.Atoi(level); ok && n == int(uint32(n)) { + if n, err := strconv.Atoi(level); err == nil && n == int(uint32(n)) { t |= uint32(n) << tracebackShift } } @@ -593,35 +589,6 @@ func setTraceback(level string) { atomic.Store(&traceback_cache, t) } -// Poor mans 64-bit division. -// This is a very special function, do not use it if you are not sure what you are doing. -// int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions. -// Handles overflow in a time-specific manner. -// This keeps us within no-split stack limits on 32-bit processors. -// -//go:nosplit -func timediv(v int64, div int32, rem *int32) int32 { - res := int32(0) - for bit := 30; bit >= 0; bit-- { - if v >= int64(div)<= int64(div) { - if rem != nil { - *rem = 0 - } - return 0x7fffffff - } - if rem != nil { - *rem = int32(v) - } - return res -} - // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block. //go:nosplit diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 6016c6fde05..6c955460d4f 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -42,12 +42,16 @@ const ( // _Grunning means this goroutine may execute user code. The // stack is owned by this goroutine. It is not on a run queue. - // It is assigned an M and a P (g.m and g.m.p are valid). + // It is assigned an M (g.m is valid) and it usually has a P + // (g.m.p is valid), but there are small windows of time where + // it might not, namely upon entering and exiting _Gsyscall. _Grunning // 2 // _Gsyscall means this goroutine is executing a system call. // It is not executing user code. The stack is owned by this // goroutine. It is not on a run queue. It is assigned an M. + // It may have a P attached, but it does not own it. Code + // executing in this state must not touch g.m.p. _Gsyscall // 3 // _Gwaiting means this goroutine is blocked in the runtime. @@ -90,6 +94,10 @@ const ( // _Gleaked represents a leaked goroutine caught by the GC. _Gleaked // 10 + // _Gdeadextra is a _Gdead goroutine that's attached to an extra M + // used for cgo callbacks. + _Gdeadextra // 11 + // _Gscan combined with one of the above states other than // _Grunning indicates that GC is scanning the stack. The // goroutine is not executing user code and the stack is owned @@ -108,6 +116,7 @@ const ( _Gscanwaiting = _Gscan + _Gwaiting // 0x1004 _Gscanpreempted = _Gscan + _Gpreempted // 0x1009 _Gscanleaked = _Gscan + _Gleaked // 0x100a + _Gscandeadextra = _Gscan + _Gdeadextra // 0x100b ) const ( @@ -126,22 +135,15 @@ const ( // run user code or the scheduler. Only the M that owns this P // is allowed to change the P's status from _Prunning. The M // may transition the P to _Pidle (if it has no more work to - // do), _Psyscall (when entering a syscall), or _Pgcstop (to - // halt for the GC). The M may also hand ownership of the P - // off directly to another M (e.g., to schedule a locked G). + // do), or _Pgcstop (to halt for the GC). The M may also hand + // ownership of the P off directly to another M (for example, + // to schedule a locked G). _Prunning - // _Psyscall means a P is not running user code. It has - // affinity to an M in a syscall but is not owned by it and - // may be stolen by another M. This is similar to _Pidle but - // uses lightweight transitions and maintains M affinity. - // - // Leaving _Psyscall must be done with a CAS, either to steal - // or retake the P. Note that there's an ABA hazard: even if - // an M successfully CASes its original P back to _Prunning - // after a syscall, it must understand the P may have been - // used by another M in the interim. - _Psyscall + // _Psyscall_unused is a now-defunct state for a P. A P is + // identified as "in a system call" by looking at the goroutine's + // state. + _Psyscall_unused // _Pgcstop means a P is halted for STW and owned by the M // that stopped the world. The M that stopped the world @@ -615,18 +617,27 @@ type m struct { morebuf gobuf // gobuf arg to morestack divmod uint32 // div/mod denominator for arm - known to liblink (cmd/internal/obj/arm/obj5.go) - // Fields not known to debuggers. - procid uint64 // for debuggers, but offset not hard-coded - gsignal *g // signal-handling g - goSigStack gsignalStack // Go-allocated signal handling stack - sigmask sigset // storage for saved signal mask - tls [tlsSlots]uintptr // thread-local storage (for x86 extern register) - mstartfn func() - curg *g // current running goroutine - caughtsig guintptr // goroutine running during fatal signal - p puintptr // attached p for executing go code (nil if not executing go code) - nextp puintptr - oldp puintptr // the p that was attached before executing a syscall + // Fields whose offsets are not known to debuggers. + + procid uint64 // for debuggers, but offset not hard-coded + gsignal *g // signal-handling g + goSigStack gsignalStack // Go-allocated signal handling stack + sigmask sigset // storage for saved signal mask + tls [tlsSlots]uintptr // thread-local storage (for x86 extern register) + mstartfn func() + curg *g // current running goroutine + caughtsig guintptr // goroutine running during fatal signal + + // p is the currently attached P for executing Go code, nil if not executing user Go code. + // + // A non-nil p implies exclusive ownership of the P, unless curg is in _Gsyscall. + // In _Gsyscall the scheduler may mutate this instead. The point of synchronization + // is the _Gscan bit on curg's status. The scheduler must arrange to prevent curg + // from transitioning out of _Gsyscall if it intends to mutate p. + p puintptr + + nextp puintptr // The next P to install before executing. Implies exclusive ownership of this P. + oldp puintptr // The P that was attached before executing a syscall. id int64 mallocing int32 throwing throwType @@ -654,6 +665,7 @@ type m struct { park note alllink *m // on allm schedlink muintptr + idleNode listNodeManual lockedg guintptr createstack [32]uintptr // stack that created this thread, it's used for StackRecord.Stack0, so it must align with it. lockedExt uint32 // tracking for external LockOSThread @@ -704,6 +716,9 @@ type m struct { // Up to 10 locks held by this m, maintained by the lock ranking code. locksHeldLen int locksHeld [10]heldLockInfo + + // self points this M until mexit clears it to return nil. + self mWeakPointer } const mRedZoneSize = (16 << 3) * asanenabledBit // redZoneSize(2048) @@ -718,6 +733,37 @@ type mPadded struct { _ [(1 - goarch.IsWasm) * (2048 - mallocHeaderSize - mRedZoneSize - unsafe.Sizeof(m{}))]byte } +// mWeakPointer is a "weak" pointer to an M. A weak pointer for each M is +// available as m.self. Users may copy mWeakPointer arbitrarily, and get will +// return the M if it is still live, or nil after mexit. +// +// The zero value is treated as a nil pointer. +// +// Note that get may race with M exit. A successful get will keep the m object +// alive, but the M itself may be exited and thus not actually usable. +type mWeakPointer struct { + m *atomic.Pointer[m] +} + +func newMWeakPointer(mp *m) mWeakPointer { + w := mWeakPointer{m: new(atomic.Pointer[m])} + w.m.Store(mp) + return w +} + +func (w mWeakPointer) get() *m { + if w.m == nil { + return nil + } + return w.m.Load() +} + +// clear sets the weak pointer to nil. It cannot be used on zero value +// mWeakPointers. +func (w mWeakPointer) clear() { + w.m.Store(nil) +} + type p struct { id int32 status uint32 // one of pidle/prunning/... @@ -730,6 +776,17 @@ type p struct { pcache pageCache raceprocctx uintptr + // oldm is the previous m this p ran on. + // + // We are not assosciated with this m, so we have no control over its + // lifecycle. This value is an m.self object which points to the m + // until the m exits. + // + // Note that this m may be idle, running, or exiting. It should only be + // used with mgetSpecific, which will take ownership of the m only if + // it is idle. + oldm mWeakPointer + deferpool []*_defer // pool of available defer structs (see panic.go) deferpoolbuf [32]*_defer @@ -782,7 +839,7 @@ type p struct { // Per-P GC state gcAssistTime int64 // Nanoseconds in assistAlloc - gcFractionalMarkTime int64 // Nanoseconds in fractional mark worker (atomic) + gcFractionalMarkTime atomic.Int64 // Nanoseconds in fractional mark worker // limiterEvent tracks events for the GC CPU limiter. limiterEvent limiterEvent @@ -864,7 +921,7 @@ type schedt struct { // When increasing nmidle, nmidlelocked, nmsys, or nmfreed, be // sure to call checkdead(). - midle muintptr // idle m's waiting for work + midle listHeadManual // idle m's waiting for work nmidle int32 // number of idle m's waiting for work nmidlelocked int32 // number of locked m's waiting for work mnext int64 // number of m's that have been created and next M ID @@ -998,7 +1055,7 @@ const ( type _func struct { sys.NotInHeap // Only in static data - entryOff uint32 // start pc, as offset from moduledata.text/pcHeader.textStart + entryOff uint32 // start pc, as offset from moduledata.text nameOff int32 // function name, as index into moduledata.funcnametab. args int32 // in/out args size @@ -1347,7 +1404,7 @@ var ( // be atomic. Length may change at safe points. // // Each P must update only its own bit. In order to maintain - // consistency, a P going idle must the idle mask simultaneously with + // consistency, a P going idle must set the idle mask simultaneously with // updates to the idle P list under the sched.lock, otherwise a racing // pidleget may clear the mask before pidleput sets the mask, // corrupting the bitmap. diff --git a/src/runtime/runtime_test.go b/src/runtime/runtime_test.go index 6c628f8903c..c1c63e3ceaf 100644 --- a/src/runtime/runtime_test.go +++ b/src/runtime/runtime_test.go @@ -6,7 +6,6 @@ package runtime_test import ( "flag" - "fmt" "internal/asan" "internal/cpu" "internal/msan" @@ -498,81 +497,6 @@ func TestVersion(t *testing.T) { } } -func TestTimediv(t *testing.T) { - for _, tc := range []struct { - num int64 - div int32 - ret int32 - rem int32 - }{ - { - num: 8, - div: 2, - ret: 4, - rem: 0, - }, - { - num: 9, - div: 2, - ret: 4, - rem: 1, - }, - { - // Used by runtime.check. - num: 12345*1000000000 + 54321, - div: 1000000000, - ret: 12345, - rem: 54321, - }, - { - num: 1<<32 - 1, - div: 2, - ret: 1<<31 - 1, // no overflow. - rem: 1, - }, - { - num: 1 << 32, - div: 2, - ret: 1<<31 - 1, // overflow. - rem: 0, - }, - { - num: 1 << 40, - div: 2, - ret: 1<<31 - 1, // overflow. - rem: 0, - }, - { - num: 1<<40 + 1, - div: 1 << 10, - ret: 1 << 30, - rem: 1, - }, - } { - name := fmt.Sprintf("%d div %d", tc.num, tc.div) - t.Run(name, func(t *testing.T) { - // Double check that the inputs make sense using - // standard 64-bit division. - ret64 := tc.num / int64(tc.div) - rem64 := tc.num % int64(tc.div) - if ret64 != int64(int32(ret64)) { - // Simulate timediv overflow value. - ret64 = 1<<31 - 1 - rem64 = 0 - } - if ret64 != int64(tc.ret) { - t.Errorf("%d / %d got ret %d rem %d want ret %d rem %d", tc.num, tc.div, ret64, rem64, tc.ret, tc.rem) - } - - var rem int32 - ret := Timediv(tc.num, tc.div, &rem) - if ret != tc.ret || rem != tc.rem { - t.Errorf("timediv %d / %d got ret %d rem %d want ret %d rem %d", tc.num, tc.div, ret, rem, tc.ret, tc.rem) - } - }) - } -} - func BenchmarkProcYield(b *testing.B) { benchN := func(n uint32) func(*testing.B) { return func(b *testing.B) { diff --git a/src/runtime/string.go b/src/runtime/string.go index 3726d9235bf..15b3868cbc0 100644 --- a/src/runtime/string.go +++ b/src/runtime/string.go @@ -9,8 +9,8 @@ import ( "internal/bytealg" "internal/goarch" "internal/runtime/math" - "internal/runtime/strconv" "internal/runtime/sys" + "internal/strconv" "unsafe" ) @@ -420,11 +420,11 @@ func parseByteCount(s string) (int64, bool) { // Handle the easy non-suffix case. last := s[len(s)-1] if last >= '0' && last <= '9' { - n, ok := strconv.Atoi64(s) - if !ok || n < 0 { + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || n < 0 { return 0, false } - return n, ok + return n, true } // Failing a trailing digit, this must always end in 'B'. // Also at this point there must be at least one digit before @@ -435,11 +435,11 @@ func parseByteCount(s string) (int64, bool) { // The one before that must always be a digit or 'i'. if c := s[len(s)-2]; c >= '0' && c <= '9' { // Trivial 'B' suffix. - n, ok := strconv.Atoi64(s[:len(s)-1]) - if !ok || n < 0 { + n, err := strconv.ParseInt(s[:len(s)-1], 10, 64) + if err != nil || n < 0 { return 0, false } - return n, ok + return n, true } else if c != 'i' { return 0, false } @@ -466,8 +466,8 @@ func parseByteCount(s string) (int64, bool) { for i := 0; i < power; i++ { m *= 1024 } - n, ok := strconv.Atoi64(s[:len(s)-3]) - if !ok || n < 0 { + n, err := strconv.ParseInt(s[:len(s)-3], 10, 64) + if err != nil || n < 0 { return 0, false } un := uint64(n) diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go index 20fc1c59ad9..d5a35d15b25 100644 --- a/src/runtime/stubs.go +++ b/src/runtime/stubs.go @@ -274,7 +274,16 @@ func reflectcall(stackArgsType *_type, fn, stackArgs unsafe.Pointer, stackArgsSi // See go.dev/issue/67401. // //go:linkname procyield -func procyield(cycles uint32) +//go:nosplit +func procyield(cycles uint32) { + if cycles == 0 { + return + } + procyieldAsm(cycles) +} + +// procyieldAsm is the assembly implementation of procyield. +func procyieldAsm(cycles uint32) type neverCallThisFunction struct{} diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go index 62ad8d13611..3a814cd2032 100644 --- a/src/runtime/symtab.go +++ b/src/runtime/symtab.go @@ -374,13 +374,19 @@ func (f *_func) funcInfo() funcInfo { // pcHeader holds data used by the pclntab lookups. type pcHeader struct { - magic uint32 // 0xFFFFFFF1 - pad1, pad2 uint8 // 0,0 - minLC uint8 // min instruction size - ptrSize uint8 // size of a ptr in bytes - nfunc int // number of functions in the module - nfiles uint // number of entries in the file tab - textStart uintptr // base for function entry PC offsets in this module, equal to moduledata.text + magic uint32 // 0xFFFFFFF1 + pad1, pad2 uint8 // 0,0 + minLC uint8 // min instruction size + ptrSize uint8 // size of a ptr in bytes + nfunc int // number of functions in the module + nfiles uint // number of entries in the file tab + + // The next field used to be textStart. This is no longer stored + // as it requires a relocation. Code should use the moduledata text + // field instead. This unused field can be removed in coordination + // with Delve. + _ uintptr + funcnameOffset uintptr // offset to the funcnametab variable from pcHeader cuOffset uintptr // offset to the cutab variable from pcHeader filetabOffset uintptr // offset to the filetab variable from pcHeader @@ -618,10 +624,9 @@ func moduledataverify1(datap *moduledata) { // Check that the pclntab's format is valid. hdr := datap.pcHeader if hdr.magic != 0xfffffff1 || hdr.pad1 != 0 || hdr.pad2 != 0 || - hdr.minLC != sys.PCQuantum || hdr.ptrSize != goarch.PtrSize || hdr.textStart != datap.text { + hdr.minLC != sys.PCQuantum || hdr.ptrSize != goarch.PtrSize { println("runtime: pcHeader: magic=", hex(hdr.magic), "pad1=", hdr.pad1, "pad2=", hdr.pad2, - "minLC=", hdr.minLC, "ptrSize=", hdr.ptrSize, "pcHeader.textStart=", hex(hdr.textStart), - "text=", hex(datap.text), "pluginpath=", datap.pluginpath) + "minLC=", hdr.minLC, "ptrSize=", hdr.ptrSize, "pluginpath=", datap.pluginpath) throw("invalid function symbol table") } diff --git a/src/runtime/synctest.go b/src/runtime/synctest.go index 529f69fd930..8fdf437acbb 100644 --- a/src/runtime/synctest.go +++ b/src/runtime/synctest.go @@ -52,7 +52,7 @@ func (bubble *synctestBubble) changegstatus(gp *g, oldval, newval uint32) { totalDelta := 0 wasRunning := true switch oldval { - case _Gdead: + case _Gdead, _Gdeadextra: wasRunning = false totalDelta++ case _Gwaiting: @@ -62,7 +62,7 @@ func (bubble *synctestBubble) changegstatus(gp *g, oldval, newval uint32) { } isRunning := true switch newval { - case _Gdead: + case _Gdead, _Gdeadextra: isRunning = false totalDelta-- if gp == bubble.main { @@ -86,7 +86,7 @@ func (bubble *synctestBubble) changegstatus(gp *g, oldval, newval uint32) { bubble.running++ } else { bubble.running-- - if raceenabled && newval != _Gdead { + if raceenabled && newval != _Gdead && newval != _Gdeadextra { // Record that this goroutine parking happens before // any subsequent Wait. racereleasemergeg(gp, bubble.raceaddr()) diff --git a/src/runtime/sys_darwin.go b/src/runtime/sys_darwin.go index aa628021a05..48ad5afd8a0 100644 --- a/src/runtime/sys_darwin.go +++ b/src/runtime/sys_darwin.go @@ -10,186 +10,65 @@ import ( "unsafe" ) -//go:nosplit -func libcError() uintptr { - errPtr, _ := syscall(abi.FuncPCABI0(libc_error_trampoline), 0, 0, 0) - return errPtr -} func libc_error_trampoline() -// The X versions of syscall expect the libc call to return a 64-bit result. -// Otherwise (the non-X version) expects a 32-bit result. -// This distinction is required because an error is indicated by returning -1, -// and we need to know whether to check 32 or 64 bits of the result. -// (Some libc functions that return 32 bits put junk in the upper 32 bits of AX.) - -//go:nosplit -func syscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr) { - args := struct{ fn, a1, a2, a3, r1, r2 uintptr }{fn, a1, a2, a3, r1, r2} - libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall_trampoline)), unsafe.Pointer(&args)) - return args.r1, args.r2 -} -func syscall_trampoline() - -// golang.org/x/sys linknames syscall_syscall -// (in addition to standard package syscall). -// Do not remove or change the type signature. +// libc_error_addr puts the libc error +// address into addr. // -//go:linkname syscall_syscall syscall.syscall //go:nosplit -func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { +//go:cgo_unsafe_args +func libc_error_addr(addr **int32) { + libcCall(unsafe.Pointer(abi.FuncPCABI0(libc_error_trampoline)), unsafe.Pointer(&addr)) +} + +// libcCallInfo is a structure used to pass parameters to the system call. +type libcCallInfo struct { + fn uintptr + n uintptr // number of parameters + args uintptr // parameters + r1, r2 uintptr // return values +} + +// syscall_syscalln is a wrapper around the libc call with variable arguments. +// +//go:linkname syscall_syscalln syscall.syscalln +//go:nosplit +//go:uintptrkeepalive +func syscall_syscalln(fn uintptr, args ...uintptr) (r1, r2, err uintptr) { entersyscall() - r1, r2, err = syscall_rawSyscall(fn, a1, a2, a3) + r1, r2, err = syscall_rawsyscalln(fn, args...) exitsyscall() return r1, r2, err } -//go:linkname syscall_syscallX syscall.syscallX -//go:nosplit -func syscall_syscallX(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { - entersyscall() - r1, r2, err = syscall_rawSyscallX(fn, a1, a2, a3) - exitsyscall() - return r1, r2, err -} - -//go:nosplit -func syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr) { - args := struct{ fn, a1, a2, a3, a4, a5, a6, r1, r2 uintptr }{fn, a1, a2, a3, a4, a5, a6, r1, r2} - libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6_trampoline)), unsafe.Pointer(&args)) - return args.r1, args.r2 -} -func syscall6_trampoline() - -// golang.org/x/sys linknames syscall.syscall6 -// (in addition to standard package syscall). -// Do not remove or change the type signature. +// syscall_rawsyscalln is a wrapper around the libc call with variable arguments. +// The scheduler is not notified about the system call. +// The syscall is executed on the current goroutine thread rather than on a +// dedicated syscall thread. // -// syscall.syscall6 is meant for package syscall (and x/sys), -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/tetratelabs/wazero -// -// See go.dev/issue/67401. -// -//go:linkname syscall_syscall6 syscall.syscall6 +//go:linkname syscall_rawsyscalln syscall.rawsyscalln //go:nosplit -func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { - entersyscall() - r1, r2, err = syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6) - exitsyscall() - return r1, r2, err -} - -//go:linkname syscall_syscall6X syscall.syscall6X -//go:nosplit -func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { - entersyscall() - r1, r2, err = syscall_rawSyscall6X(fn, a1, a2, a3, a4, a5, a6) - exitsyscall() - return r1, r2, err -} - -//go:nosplit -func syscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr) { - args := struct{ fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, r1, r2 uintptr }{fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, r1, r2} - libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall9_trampoline)), unsafe.Pointer(&args)) - return args.r1, args.r2 -} -func syscall9_trampoline() - -// golang.org/x/sys linknames syscall.syscall9 -// (in addition to standard package syscall). -// Do not remove or change the type signature. -// -//go:linkname syscall_syscall9 syscall.syscall9 -//go:nosplit -func syscall_syscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) { - entersyscall() - r1, r2, err = syscall_rawSyscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9) - exitsyscall() - return r1, r2, err -} - -// golang.org/x/sys linknames syscall.syscallPtr -// (in addition to standard package syscall). -// Do not remove or change the type signature. -// -//go:linkname syscall_syscallPtr syscall.syscallPtr -//go:nosplit -func syscall_syscallPtr(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { - entersyscall() - r1, r2, err = syscall_rawSyscallPtr(fn, a1, a2, a3) - exitsyscall() - return r1, r2, err -} - -// golang.org/x/sys linknames syscall_rawSyscall -// (in addition to standard package syscall). -// Do not remove or change the type signature. -// -//go:linkname syscall_rawSyscall syscall.rawSyscall -//go:nosplit -func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { - r1, r2 = syscall(fn, a1, a2, a3) - // Check if r1 low 32 bits is -1, indicating an error. - if int32(r1) == -1 { - err = libcError() +//go:uintptrkeepalive +func syscall_rawsyscalln(fn uintptr, args ...uintptr) (r1, r2, err uintptr) { + c := &libcCallInfo{ + fn: fn, + n: uintptr(len(args)), } - return r1, r2, err + if c.n != 0 { + c.args = uintptr(noescape(unsafe.Pointer(&args[0]))) + } + libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallN_trampoline)), unsafe.Pointer(c)) + if gp := getg(); gp != nil && gp.m != nil && gp.m.errnoAddr != nil { + err = uintptr(*gp.m.errnoAddr) + } else { + var errnoAddr *int32 + libc_error_addr(&errnoAddr) + err = uintptr(*errnoAddr) + } + return c.r1, c.r2, err } -//go:nosplit -func syscall_rawSyscallX(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { - r1, r2 = syscall(fn, a1, a2, a3) - if r1 == ^uintptr(0) { - err = libcError() - } - return r1, r2, err -} - -//go:nosplit -func syscall_rawSyscallPtr(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { - r1, r2 = syscall(fn, a1, a2, a3) - if r1 == 0 { - err = libcError() - } - return r1, r2, err -} - -// golang.org/x/sys linknames syscall_rawSyscall6 -// (in addition to standard package syscall). -// Do not remove or change the type signature. -// -//go:linkname syscall_rawSyscall6 syscall.rawSyscall6 -//go:nosplit -func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { - r1, r2 = syscall6(fn, a1, a2, a3, a4, a5, a6) - // Check if r1 low 32 bits is -1, indicating an error. - if int32(r1) == -1 { - err = libcError() - } - return r1, r2, err -} - -//go:nosplit -func syscall_rawSyscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { - r1, r2 = syscall6(fn, a1, a2, a3, a4, a5, a6) - if r1 == ^uintptr(0) { - err = libcError() - } - return r1, r2, err -} - -//go:nosplit -func syscall_rawSyscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) { - r1, r2 = syscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9) - // Check if r1 low 32 bits is -1, indicating an error. - if int32(r1) == -1 { - err = libcError() - } - return r1, r2, err -} +func syscallN_trampoline() // crypto_x509_syscall is used in crypto/x509/internal/macos to call into Security.framework and CF. diff --git a/src/runtime/sys_darwin_amd64.s b/src/runtime/sys_darwin_amd64.s index 32aa6572749..e4e1216d569 100644 --- a/src/runtime/sys_darwin_amd64.s +++ b/src/runtime/sys_darwin_amd64.s @@ -509,118 +509,73 @@ TEXT runtime·arc4random_buf_trampoline(SB),NOSPLIT,$0 CALL libc_arc4random_buf(SB) RET -// syscall_trampoline calls a function in libc on behalf of the syscall package. -// syscall_trampoline takes a pointer to a struct like: -// struct { -// fn uintptr -// a1 uintptr -// a2 uintptr -// a3 uintptr -// r1 uintptr -// r2 uintptr -// } -// syscall_trampoline must be called on the g0 stack with the -// C calling convention (use libcCall). -TEXT runtime·syscall_trampoline(SB),NOSPLIT,$16 - MOVQ (0*8)(DI), CX // fn - MOVQ (2*8)(DI), SI // a2 - MOVQ (3*8)(DI), DX // a3 - MOVQ DI, (SP) - MOVQ (1*8)(DI), DI // a1 - XORL AX, AX // vararg: say "no float args" - CALL CX +TEXT runtime·syscallN_trampoline(SB),NOSPLIT,$0 + // store argument and original SP in a callee-saved register + MOVQ DI, R13 + MOVQ SP, R14 - MOVQ (SP), DI - MOVQ AX, (4*8)(DI) // r1 - MOVQ DX, (5*8)(DI) // r2 + MOVQ libcCallInfo_fn(R13), R11 + MOVQ libcCallInfo_n(R13), CX + MOVQ libcCallInfo_args(R13), R10 - XORL AX, AX // no error (it's ignored anyway) - RET + // Fast version, do not store args on the stack. + CMPL CX, $0; JE _0args + CMPL CX, $1; JE _1args + CMPL CX, $2; JE _2args + CMPL CX, $3; JE _3args + CMPL CX, $4; JE _4args + CMPL CX, $5; JE _5args + CMPL CX, $6; JE _6args -// syscall6_trampoline calls a function in libc on behalf of the syscall package. -// syscall6_trampoline takes a pointer to a struct like: -// struct { -// fn uintptr -// a1 uintptr -// a2 uintptr -// a3 uintptr -// a4 uintptr -// a5 uintptr -// a6 uintptr -// r1 uintptr -// r2 uintptr -// } -// syscall6_trampoline must be called on the g0 stack with the -// C calling convention (use libcCall). -TEXT runtime·syscall6_trampoline(SB),NOSPLIT,$16 - MOVQ (0*8)(DI), R11// fn - MOVQ (2*8)(DI), SI // a2 - MOVQ (3*8)(DI), DX // a3 - MOVQ (4*8)(DI), CX // a4 - MOVQ (5*8)(DI), R8 // a5 - MOVQ (6*8)(DI), R9 // a6 - MOVQ DI, (SP) - MOVQ (1*8)(DI), DI // a1 - XORL AX, AX // vararg: say "no float args" + // Reserve stack space for remaining args + MOVQ CX, R12 + SUBQ $6, R12 + ADDQ $1, R12 // make even number of words for stack alignment + ANDQ $~1, R12 + SHLQ $3, R12 + SUBQ R12, SP + + // Copy args to the stack. + // CX: count of stack arguments (n-6) + // SI: &args[6] + // DI: copy of RSP + SUBQ $6, CX + MOVQ R10, SI + ADDQ $(8*6), SI + MOVQ SP, DI + CLD + REP; MOVSQ + +_6args: + MOVQ (5*8)(R10), R9 +_5args: + MOVQ (4*8)(R10), R8 +_4args: + MOVQ (3*8)(R10), CX +_3args: + MOVQ (2*8)(R10), DX +_2args: + MOVQ (1*8)(R10), SI +_1args: + MOVQ (0*8)(R10), DI +_0args: + + XORL AX, AX // vararg: say "no float args" CALL R11 - MOVQ (SP), DI - MOVQ AX, (7*8)(DI) // r1 - MOVQ DX, (8*8)(DI) // r2 + MOVQ R14, SP // free stack space - XORL AX, AX // no error (it's ignored anyway) - RET - -// syscall9_trampoline calls a function in libc on behalf of the syscall package. -// syscall9_trampoline takes a pointer to a struct like: -// struct { -// fn uintptr -// a1 uintptr -// a2 uintptr -// a3 uintptr -// a4 uintptr -// a5 uintptr -// a6 uintptr -// a7 uintptr -// a8 uintptr -// a9 uintptr -// r1 uintptr -// r2 uintptr -// err uintptr -// } -// syscall9_trampoline must be called on the g0 stack with the -// C calling convention (use libcCall). -TEXT runtime·syscall9_trampoline(SB),NOSPLIT,$32 - MOVQ (0*8)(DI), R13// fn - MOVQ (2*8)(DI), SI // a2 - MOVQ (3*8)(DI), DX // a3 - MOVQ (4*8)(DI), CX // a4 - MOVQ (5*8)(DI), R8 // a5 - MOVQ (6*8)(DI), R9 // a6 - MOVQ (7*8)(DI), R10 // a7 - MOVQ R10, 0(SP) - MOVQ (8*8)(DI), R11 // a8 - MOVQ R11, 8(SP) - MOVQ (9*8)(DI), R12 // a9 - MOVQ R12, 16(SP) - MOVQ DI, 24(SP) - MOVQ (1*8)(DI), DI // a1 - XORL AX, AX // vararg: say "no float args" - - CALL R13 - - MOVQ 24(SP), DI - MOVQ AX, (10*8)(DI) // r1 - MOVQ DX, (11*8)(DI) // r2 - - XORL AX, AX // no error (it's ignored anyway) + // Return result. + MOVQ AX, libcCallInfo_r1(R13) + MOVQ DX, libcCallInfo_r2(R13) RET TEXT runtime·libc_error_trampoline(SB),NOSPLIT,$0 + MOVQ 0(DI), R14 CALL libc_error(SB) - MOVLQSX (AX), AX + MOVQ AX, (R14) RET // syscall_x509 is for crypto/x509. It is like syscall6 but does not check for errors, diff --git a/src/runtime/sys_darwin_arm64.s b/src/runtime/sys_darwin_arm64.s index adbb2adafe9..7bbe965c158 100644 --- a/src/runtime/sys_darwin_arm64.s +++ b/src/runtime/sys_darwin_arm64.s @@ -481,128 +481,98 @@ TEXT runtime·arc4random_buf_trampoline(SB),NOSPLIT,$0 BL libc_arc4random_buf(SB) RET -// syscall_trampoline calls a function in libc on behalf of the syscall package. -// syscall_trampoline takes a pointer to a struct like: -// struct { -// fn uintptr -// a1 uintptr -// a2 uintptr -// a3 uintptr -// r1 uintptr -// r2 uintptr -// } -// syscall_trampoline must be called on the g0 stack with the -// C calling convention (use libcCall). -TEXT runtime·syscall_trampoline(SB),NOSPLIT,$0 - SUB $16, RSP // push structure pointer - MOVD R0, 8(RSP) +TEXT runtime·syscallN_trampoline(SB),NOSPLIT,$16 + STP (R19, R20), 16(RSP) // save old R19, R20 + MOVD R0, R19 // save struct pointer + MOVD RSP, R20 // save stack pointer + SUB $16, RSP // reserve 16 bytes for sp-8 where fp may be saved. - MOVD 0(R0), R12 // fn - MOVD 16(R0), R1 // a2 - MOVD 24(R0), R2 // a3 - MOVD 8(R0), R0 // a1 + MOVD libcCallInfo_args(R19), R12 + // Do we have more than 8 arguments? + MOVD libcCallInfo_n(R19), R0 + CMP $0, R0; BEQ _0args + CMP $1, R0; BEQ _1args + CMP $2, R0; BEQ _2args + CMP $3, R0; BEQ _3args + CMP $4, R0; BEQ _4args + CMP $5, R0; BEQ _5args + CMP $6, R0; BEQ _6args + CMP $7, R0; BEQ _7args + CMP $8, R0; BEQ _8args + + // Reserve stack space for remaining args + SUB $8, R0, R2 + ADD $1, R2, R3 // make even number of words for stack alignment + AND $~1, R3 + LSL $3, R3 + SUB R3, RSP + + // R4: size of stack arguments (n-8)*8 + // R5: &args[8] + // R6: loop counter, from 0 to (n-8)*8 + // R7: scratch + // R8: copy of RSP - (R2)(RSP) assembles as (R2)(ZR) + SUB $8, R0, R4 + LSL $3, R4 + ADD $(8*8), R12, R5 + MOVD $0, R6 + MOVD RSP, R8 +stackargs: + MOVD (R6)(R5), R7 + MOVD R7, (R6)(R8) + ADD $8, R6 + CMP R6, R4 + BNE stackargs + +_8args: + MOVD (7*8)(R12), R7 +_7args: + MOVD (6*8)(R12), R6 +_6args: + MOVD (5*8)(R12), R5 +_5args: + MOVD (4*8)(R12), R4 +_4args: + MOVD (3*8)(R12), R3 +_3args: + MOVD (2*8)(R12), R2 +_2args: + MOVD (1*8)(R12), R1 +_1args: + MOVD (0*8)(R12), R0 +_0args: // If fn is declared as vararg, we have to pass the vararg arguments on the stack. // (Because ios decided not to adhere to the standard arm64 calling convention, sigh...) - // The only libSystem calls we support that are vararg are open, fcntl, and ioctl, - // which are all of the form fn(x, y, ...). So we just need to put the 3rd arg - // on the stack as well. + // The only libSystem calls we support with vararg are open, fcntl, ioctl, + // which are all of the form fn(x, y, ...), and openat, which is of the form fn(x, y, z, ...). + // So we just need to put the 3rd and the 4th arg on the stack as well. + // Note that historically openat has been called with syscall6, so we need to handle that case too. // If we ever have other vararg libSystem calls, we might need to handle more cases. + MOVD libcCallInfo_n(R19), R12 + CMP $3, R12; BNE 2(PC); MOVD R2, (RSP) - - BL (R12) - - MOVD 8(RSP), R2 // pop structure pointer - ADD $16, RSP - MOVD R0, 32(R2) // save r1 - MOVD R1, 40(R2) // save r2 - RET - -// syscall6_trampoline calls a function in libc on behalf of the syscall package. -// syscall6_trampoline takes a pointer to a struct like: -// struct { -// fn uintptr -// a1 uintptr -// a2 uintptr -// a3 uintptr -// a4 uintptr -// a5 uintptr -// a6 uintptr -// r1 uintptr -// r2 uintptr -// } -// syscall6_trampoline must be called on the g0 stack with the -// C calling convention (use libcCall). -TEXT runtime·syscall6_trampoline(SB),NOSPLIT,$0 - SUB $16, RSP // push structure pointer - MOVD R0, 8(RSP) - - MOVD 0(R0), R12 // fn - MOVD 16(R0), R1 // a2 - MOVD 24(R0), R2 // a3 - MOVD 32(R0), R3 // a4 - MOVD 40(R0), R4 // a5 - MOVD 48(R0), R5 // a6 - MOVD 8(R0), R0 // a1 - - // If fn is declared as vararg, we have to pass the vararg arguments on the stack. - // See syscall_trampoline above. The only function this applies to is openat, for which the 4th - // arg must be on the stack. + CMP $4, R12; BNE 2(PC); + MOVD R3, (RSP) + CMP $6, R12; BNE 2(PC); MOVD R3, (RSP) + MOVD libcCallInfo_fn(R19), R12 BL (R12) - MOVD 8(RSP), R2 // pop structure pointer - ADD $16, RSP - MOVD R0, 56(R2) // save r1 - MOVD R1, 64(R2) // save r2 - RET + MOVD R20, RSP // free stack space -// syscall9_trampoline calls a function in libc on behalf of the syscall package. -// syscall9_trampoline takes a pointer to a struct like: -// struct { -// fn uintptr -// a1 uintptr -// a2 uintptr -// a3 uintptr -// a4 uintptr -// a5 uintptr -// a6 uintptr -// a7 uintptr -// a8 uintptr -// a9 uintptr -// r1 uintptr -// r2 uintptr -// } -// syscall9_trampoline must be called on the g0 stack with the -// C calling convention (use libcCall). -TEXT runtime·syscall9_trampoline(SB),NOSPLIT,$0 - SUB $16, RSP // push structure pointer - MOVD R0, 8(RSP) + MOVD R0, libcCallInfo_r1(R19) + MOVD R1, libcCallInfo_r2(R19) - MOVD 0(R0), R12 // fn - MOVD 16(R0), R1 // a2 - MOVD 24(R0), R2 // a3 - MOVD 32(R0), R3 // a4 - MOVD 40(R0), R4 // a5 - MOVD 48(R0), R5 // a6 - MOVD 56(R0), R6 // a7 - MOVD 64(R0), R7 // a8 - MOVD 72(R0), R8 // a9 - MOVD R8, 0(RSP) // the 9th arg and onwards must be passed on the stack - MOVD 8(R0), R0 // a1 - - BL (R12) - - MOVD 8(RSP), R2 // pop structure pointer - ADD $16, RSP - MOVD R0, 80(R2) // save r1 - MOVD R1, 88(R2) // save r2 - RET + // Restore callee-saved registers. + LDP 16(RSP), (R19, R20) + RET TEXT runtime·libc_error_trampoline(SB),NOSPLIT,$0 + MOVD 0(R0), R20 BL libc_error(SB) - MOVW (R0), R0 + MOVD R0, (R20) RET // syscall_x509 is for crypto/x509. It is like syscall6 but does not check for errors, diff --git a/src/runtime/sys_linux_386.s b/src/runtime/sys_linux_386.s index 1c3f1ff3e6e..f664d8ace7f 100644 --- a/src/runtime/sys_linux_386.s +++ b/src/runtime/sys_linux_386.s @@ -54,6 +54,7 @@ #define SYS_exit_group 252 #define SYS_timer_create 259 #define SYS_timer_settime 260 +#define SYS_timer_settime64 409 #define SYS_timer_delete 263 #define SYS_clock_gettime 265 #define SYS_tgkill 270 @@ -209,7 +210,8 @@ TEXT runtime·timer_create(SB),NOSPLIT,$0-16 MOVL AX, ret+12(FP) RET -TEXT runtime·timer_settime(SB),NOSPLIT,$0-20 +// Linux: kernel/time/posix-timer.c, requiring COMPAT_32BIT_TIME +TEXT runtime·timer_settime32(SB),NOSPLIT,$0-20 MOVL $SYS_timer_settime, AX MOVL timerid+0(FP), BX MOVL flags+4(FP), CX @@ -219,6 +221,16 @@ TEXT runtime·timer_settime(SB),NOSPLIT,$0-20 MOVL AX, ret+16(FP) RET +TEXT runtime·timer_settime64(SB),NOSPLIT,$0-20 + MOVL $SYS_timer_settime64, AX + MOVL timerid+0(FP), BX + MOVL flags+4(FP), CX + MOVL new+8(FP), DX + MOVL old+12(FP), SI + INVOKE_SYSCALL + MOVL AX, ret+16(FP) + RET + TEXT runtime·timer_delete(SB),NOSPLIT,$0-8 MOVL $SYS_timer_delete, AX MOVL timerid+0(FP), BX diff --git a/src/runtime/sys_linux_arm.s b/src/runtime/sys_linux_arm.s index 44b56ccb9f0..f19e293d0fc 100644 --- a/src/runtime/sys_linux_arm.s +++ b/src/runtime/sys_linux_arm.s @@ -44,6 +44,7 @@ #define SYS_clock_gettime (SYS_BASE + 263) #define SYS_timer_create (SYS_BASE + 257) #define SYS_timer_settime (SYS_BASE + 258) +#define SYS_timer_settime64 (SYS_BASE + 409) #define SYS_timer_delete (SYS_BASE + 261) #define SYS_pipe2 (SYS_BASE + 359) #define SYS_access (SYS_BASE + 33) @@ -231,8 +232,8 @@ TEXT runtime·timer_create(SB),NOSPLIT,$0-16 SWI $0 MOVW R0, ret+12(FP) RET - -TEXT runtime·timer_settime(SB),NOSPLIT,$0-20 +// Linux: kernel/time/posix-timer.c, requiring COMPAT_32BIT_TIME. +TEXT runtime·timer_settime32(SB),NOSPLIT,$0-20 MOVW timerid+0(FP), R0 MOVW flags+4(FP), R1 MOVW new+8(FP), R2 @@ -242,6 +243,16 @@ TEXT runtime·timer_settime(SB),NOSPLIT,$0-20 MOVW R0, ret+16(FP) RET +TEXT runtime·timer_settime64(SB),NOSPLIT,$0-20 + MOVW timerid+0(FP), R0 + MOVW flags+4(FP), R1 + MOVW new+8(FP), R2 + MOVW old+12(FP), R3 + MOVW $SYS_timer_settime64, R7 + SWI $0 + MOVW R0, ret+16(FP) + RET + TEXT runtime·timer_delete(SB),NOSPLIT,$0-8 MOVW timerid+0(FP), R0 MOVW $SYS_timer_delete, R7 diff --git a/src/runtime/sys_linux_mipsx.s b/src/runtime/sys_linux_mipsx.s index 6f11841efc7..bc3f84fbb95 100644 --- a/src/runtime/sys_linux_mipsx.s +++ b/src/runtime/sys_linux_mipsx.s @@ -39,6 +39,7 @@ #define SYS_exit_group 4246 #define SYS_timer_create 4257 #define SYS_timer_settime 4258 +#define SYS_timer_settime64 4409 #define SYS_timer_delete 4261 #define SYS_clock_gettime 4263 #define SYS_tgkill 4266 @@ -197,7 +198,8 @@ TEXT runtime·timer_create(SB),NOSPLIT,$0-16 MOVW R2, ret+12(FP) RET -TEXT runtime·timer_settime(SB),NOSPLIT,$0-20 +// Linux: kernel/time/posix-timer.c, requiring COMPAT_32BIT_TIME +TEXT runtime·timer_settime32(SB),NOSPLIT,$0-20 MOVW timerid+0(FP), R4 MOVW flags+4(FP), R5 MOVW new+8(FP), R6 @@ -207,6 +209,16 @@ TEXT runtime·timer_settime(SB),NOSPLIT,$0-20 MOVW R2, ret+16(FP) RET +TEXT runtime·timer_settime64(SB),NOSPLIT,$0-20 + MOVW timerid+0(FP), R4 + MOVW flags+4(FP), R5 + MOVW new+8(FP), R6 + MOVW old+12(FP), R7 + MOVW $SYS_timer_settime64, R2 + SYSCALL + MOVW R2, ret+16(FP) + RET + TEXT runtime·timer_delete(SB),NOSPLIT,$0-8 MOVW timerid+0(FP), R4 MOVW $SYS_timer_delete, R2 diff --git a/src/runtime/tagptr_32bit.go b/src/runtime/tagptr_32bit.go index d846904130d..b2175df9130 100644 --- a/src/runtime/tagptr_32bit.go +++ b/src/runtime/tagptr_32bit.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build 386 || arm || mips || mipsle +//go:build 386 || arm || mips || mipsle || (gccgo && (ppc || s390)) package runtime diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/README.md b/src/runtime/testdata/testgoroutineleakprofile/goker/README.md index 88c50e1e480..e6f8fe23f26 100644 --- a/src/runtime/testdata/testgoroutineleakprofile/goker/README.md +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/README.md @@ -24,18 +24,22 @@ Jingling Xue (jingling@cse.unsw.edu.au): White paper: https://lujie.ac.cn/files/papers/GoBench.pdf -The examples have been modified in order to run the goroutine leak -profiler. Buggy snippets are moved from within a unit test to separate -applications. Each is then independently executed, possibly as multiple -copies within the same application in order to exercise more interleavings. -Concurrently, the main program sets up a waiting period (typically 1ms), followed -by a goroutine leak profile request. Other modifications may involve injecting calls -to `runtime.Gosched()`, to more reliably exercise buggy interleavings, or reductions -in waiting periods when calling `time.Sleep`, in order to reduce overall testing time. +The examples have been modified in order to run the goroutine leak profiler. +Buggy snippets are moved from within a unit test to separate applications. +Each is then independently executed, possibly as multiple copies within the +same application in order to exercise more interleavings. Concurrently, the +main program sets up a waiting period (typically 1ms), followed by a goroutine +leak profile request. Other modifications may involve injecting calls to +`runtime.Gosched()`, to more reliably exercise buggy interleavings, or reductions +in waiting periods when calling `time.Sleep`, in order to reduce overall testing +time. -The resulting goroutine leak profile is analyzed to ensure that no unexpected leaks occurred, -and that the expected leaks did occur. If the leak is flaky, the only purpose of the expected -leak list is to protect against unexpected leaks. +The resulting goroutine leak profile is analyzed to ensure that no unexpecte +leaks occurred, and that the expected leaks did occur. If the leak is flaky, the +only purpose of the expected leak list is to protect against unexpected leaks. + +The examples have also been modified to remove data races, since those create flaky +test failures, when really all we care about are leaked goroutines. The entries below document each of the corresponding leaks. @@ -1844,4 +1848,4 @@ c.inbox <- <================> [<-c.inbox] . close(c.closed) . <-c.dispatcherLoopStopped ---------------------G1,G2 leak------------------------------- -``` \ No newline at end of file +``` diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go index 687baed25a2..87cf1579968 100644 --- a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go @@ -44,9 +44,9 @@ func (s *Stopper_cockroach1055) SetStopped() { func (s *Stopper_cockroach1055) Quiesce() { s.mu.Lock() defer s.mu.Unlock() - s.draining = 1 + atomic.StoreInt32(&s.draining, 1) s.drain.Wait() - s.draining = 0 + atomic.StoreInt32(&s.draining, 0) } func (s *Stopper_cockroach1055) Stop() { diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go index 7b3398fd381..9b53d9035ca 100644 --- a/src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go @@ -208,6 +208,7 @@ func (container *Container_moby27782) Reset() { } type JSONFileLogger_moby27782 struct { + mu sync.Mutex readers map[*LogWatcher_moby27782]struct{} } @@ -218,11 +219,17 @@ func (l *JSONFileLogger_moby27782) ReadLogs() *LogWatcher_moby27782 { } func (l *JSONFileLogger_moby27782) readLogs(logWatcher *LogWatcher_moby27782) { + l.mu.Lock() + defer l.mu.Unlock() + l.readers[logWatcher] = struct{}{} followLogs_moby27782(logWatcher) } func (l *JSONFileLogger_moby27782) Close() { + l.mu.Lock() + defer l.mu.Unlock() + for r := range l.readers { r.Close() delete(l.readers, r) diff --git a/src/runtime/testdata/testprog/panicprint.go b/src/runtime/testdata/testprog/panicprint.go index 4ce958ba3db..eaf3ba2c337 100644 --- a/src/runtime/testdata/testprog/panicprint.go +++ b/src/runtime/testdata/testprog/panicprint.go @@ -4,6 +4,8 @@ package main +import "sync" + type MyBool bool type MyComplex128 complex128 type MyComplex64 complex64 @@ -90,6 +92,23 @@ func panicCustomFloat32() { panic(MyFloat32(-93.70)) } +func panicDeferFatal() { + var mu sync.Mutex + defer mu.Unlock() + var i *int + *i = 0 +} + +func panicDoublieDeferFatal() { + var mu sync.Mutex + defer mu.Unlock() + defer func() { + panic(recover()) + }() + var i *int + *i = 0 +} + func init() { register("panicCustomComplex64", panicCustomComplex64) register("panicCustomComplex128", panicCustomComplex128) @@ -108,4 +127,6 @@ func init() { register("panicCustomUint32", panicCustomUint32) register("panicCustomUint64", panicCustomUint64) register("panicCustomUintptr", panicCustomUintptr) + register("panicDeferFatal", panicDeferFatal) + register("panicDoublieDeferFatal", panicDoublieDeferFatal) } diff --git a/src/runtime/testdata/testprog/schedmetrics.go b/src/runtime/testdata/testprog/schedmetrics.go index 6d3f68a848e..bc0906330f1 100644 --- a/src/runtime/testdata/testprog/schedmetrics.go +++ b/src/runtime/testdata/testprog/schedmetrics.go @@ -84,7 +84,12 @@ func SchedMetrics() { // threadsSlack is the maximum number of threads left over // from the runtime (sysmon, the template thread, etc.) - const threadsSlack = 4 + // Certain build modes may also cause the creation of additional + // threads through frequent scheduling, like mayMoreStackPreempt. + // A slack of 5 is arbitrary but appears to be enough to cover + // the leftovers plus any inflation from scheduling-heavy build + // modes. + const threadsSlack = 5 // Make sure GC isn't running, since GC workers interfere with // expected counts. diff --git a/src/runtime/testdata/testprog/stw_mexit.go b/src/runtime/testdata/testprog/stw_mexit.go new file mode 100644 index 00000000000..b022ef47775 --- /dev/null +++ b/src/runtime/testdata/testprog/stw_mexit.go @@ -0,0 +1,69 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "runtime" +) + +func init() { + register("mexitSTW", mexitSTW) +} + +// Stress test for pp.oldm pointing to an exited M. +// +// If pp.oldm points to an exited M it should be ignored and another M used +// instead. To stress: +// +// 1. Start and exit many threads (thus setting oldm on some P). +// 2. Meanwhile, frequently stop the world. +// +// If procresize incorrect attempts to assign a P to an exited M, likely +// failure modes are: +// +// 1. Crash in startTheWorldWithSema attempting to access the M, if it is nil. +// +// 2. Memory corruption elsewhere after startTheWorldWithSema writes to the M, +// if it is not nil, but is freed and reused for another allocation. +// +// 3. Hang on a subsequent stop the world waiting for the P to stop, if the M +// object is valid, but the M is exited, because startTheWorldWithSema didn't +// actually wake anything to run the P. The P is _Pidle, but not in the pidle +// list, thus startTheWorldWithSema will wake for it to actively stop. +// +// For this to go wrong, an exited M must fail to clear mp.self and must leave +// the M on the sched.midle list. +// +// Similar to TraceSTW. +func mexitSTW() { + // Ensure we have multiple Ps, but not too many, as we want the + // runnable goroutines likely to run on Ps with oldm set. + runtime.GOMAXPROCS(4) + + // Background busy work so there is always something runnable. + for i := range 2 { + go traceSTWTarget(i) + } + + // Wait for children to start running. + ping.Store(1) + for pong[0].Load() != 1 {} + for pong[1].Load() != 1 {} + + for range 100 { + // Exit a thread. The last P to run this will have it in oldm. + go func() { + runtime.LockOSThread() + }() + + // STW + var ms runtime.MemStats + runtime.ReadMemStats(&ms) + } + + stop.Store(true) + + println("OK") +} diff --git a/src/runtime/testdata/testprog/stw_trace.go b/src/runtime/testdata/testprog/stw_trace.go new file mode 100644 index 00000000000..0fed55b8757 --- /dev/null +++ b/src/runtime/testdata/testprog/stw_trace.go @@ -0,0 +1,99 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "context" + "log" + "os" + "runtime" + "runtime/debug" + "runtime/trace" + "sync/atomic" +) + +func init() { + register("TraceSTW", TraceSTW) +} + +// The parent writes to ping and waits for the children to write back +// via pong to show that they are running. +var ping atomic.Uint32 +var pong [2]atomic.Uint32 + +// Tell runners to stop. +var stop atomic.Bool + +func traceSTWTarget(i int) { + for !stop.Load() { + // Async preemption often takes 100ms+ to preempt this loop on + // windows-386. This makes the test flaky, as the traceReadCPU + // timer often fires by the time STW finishes, jumbling the + // goroutine scheduling. As a workaround, ensure we have a + // morestack call for prompt preemption. + ensureMorestack() + + pong[i].Store(ping.Load()) + } +} + +func TraceSTW() { + ctx := context.Background() + + // The idea here is to have 2 target goroutines that are constantly + // running. When the world restarts after STW, we expect these + // goroutines to continue execution on the same M and P. + // + // Set GOMAXPROCS=4 to make room for the 2 target goroutines, 1 parent, + // and 1 slack for potential misscheduling. + // + // Disable the GC because GC STW generally moves goroutines (see + // https://go.dev/issue/65694). Alternatively, we could just ignore the + // trace if the GC runs. + runtime.GOMAXPROCS(4) + debug.SetGCPercent(0) + + if err := trace.Start(os.Stdout); err != nil { + log.Fatalf("failed to start tracing: %v", err) + } + defer trace.Stop() + + for i := range 2 { + go traceSTWTarget(i) + } + + // Wait for children to start running. + ping.Store(1) + for pong[0].Load() != 1 {} + for pong[1].Load() != 1 {} + + trace.Log(ctx, "TraceSTW", "start") + + // STW + var ms runtime.MemStats + runtime.ReadMemStats(&ms) + + // Make sure to run long enough for the children to schedule again + // after STW. + ping.Store(2) + for pong[0].Load() != 2 {} + for pong[1].Load() != 2 {} + + trace.Log(ctx, "TraceSTW", "end") + + stop.Store(true) +} + +// Manually insert a morestack call. Leaf functions can omit morestack, but +// non-leaf functions should include them. + +//go:noinline +func ensureMorestack() { + ensureMorestack1() +} + +//go:noinline +func ensureMorestack1() { +} diff --git a/src/runtime/time.go b/src/runtime/time.go index e9d1f0b6c9a..81a4c6b79f1 100644 --- a/src/runtime/time.go +++ b/src/runtime/time.go @@ -636,6 +636,9 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in } if t.state&timerHeaped == 0 && when <= bubble.now { systemstack(func() { + if !async && t.isChan { + unlock(&t.sendLock) + } t.unlockAndRun(bubble.now, bubble) }) return pending diff --git a/src/runtime/trace.go b/src/runtime/trace.go index 2c712469ea6..7130e2c1362 100644 --- a/src/runtime/trace.go +++ b/src/runtime/trace.go @@ -2,20 +2,182 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Go execution tracer. +// # Go execution tracer +// // The tracer captures a wide range of execution events like goroutine // creation/blocking/unblocking, syscall enter/exit/block, GC-related events, // changes of heap size, processor start/stop, etc and writes them to a buffer // in a compact form. A precise nanosecond-precision timestamp and a stack // trace is captured for most events. // -// Tracer invariants (to keep the synchronization making sense): -// - An m that has a trace buffer must be on either the allm or sched.freem lists. -// - Any trace buffer mutation must either be happening in traceAdvance or between -// a traceAcquire and a subsequent traceRelease. -// - traceAdvance cannot return until the previous generation's buffers are all flushed. +// ## Design // -// See https://go.dev/issue/60773 for a link to the full design. +// The basic idea behind the the execution tracer is to have per-M buffers that +// trace data may be written into. Each M maintains a seqlock indicating whether +// its trace buffer is currently in use. +// +// Tracing is initiated by StartTrace, and proceeds in "generations," with each +// generation being marked by a call to traceAdvance, to advance to the next +// generation. Generations are a global synchronization point for trace data, +// and we proceed to a new generation by moving forward trace.gen. Each M reads +// trace.gen under its own seqlock to determine which generation it is writing +// trace data for. To this end, each M has 2 slots for buffers: one slot for the +// previous generation, one slot for the current one. It uses tl.gen to select +// which buffer slot to write to. Simultaneously, traceAdvance uses the seqlock +// to determine whether every thread is guaranteed to observe an updated +// trace.gen. Once it is sure, it may then flush any buffers that are left over +// from the previous generation safely, since it knows the Ms will not mutate +// it. +// +// Flushed buffers are processed by the ReadTrace function, which is called by +// the trace reader goroutine. The first goroutine to call ReadTrace is designated +// as the trace reader goroutine until tracing completes. (There may only be one at +// a time.) +// +// Once all buffers are flushed, any extra post-processing complete, and flushed +// buffers are processed by the trace reader goroutine, the trace emits an +// EndOfGeneration event to mark the global synchronization point in the trace. +// +// All other trace features, including CPU profile samples, stack information, +// string tables, etc. all revolve around this generation system, and typically +// appear in pairs: one for the previous generation, and one for the current one. +// Like the per-M buffers, which of the two is written to is selected using trace.gen, +// and anything managed this way must similarly be mutated only in traceAdvance or +// under the M's seqlock. +// +// Trace events themselves are simple. They consist of a single byte for the event type, +// followed by zero or more LEB128-encoded unsigned varints. They are decoded using +// a pre-determined table for each trace version: internal/trace/tracev2.specs. +// +// To avoid relying on timestamps for correctness and validation, each G and P have +// sequence counters that are written into trace events to encode a partial order. +// The sequence counters reset on each generation. Ms do not need sequence counters +// because they are the source of truth for execution: trace events, and even whole +// buffers, are guaranteed to appear in order in the trace data stream, simply because +// that's the order the thread emitted them in. +// +// See traceruntime.go for the API the tracer exposes to the runtime for emitting events. +// +// In each generation, we ensure that we enumerate all goroutines, such that each +// generation's data is fully self-contained. This makes features like the flight +// recorder easy to implement. To this end, we guarantee that every live goroutine is +// listed at least once by emitting a status event for the goroutine, indicating its +// starting state. These status events are emitted based on context, generally based +// on the event that's about to be emitted. +// +// The traceEventWriter type encapsulates these details, and is the backbone of +// the API exposed in traceruntime.go, though there are deviations where necessary. +// +// This is the overall design, but as always, there are many details. Beyond this, +// look to the invariants and select corner cases below and the code itself for the +// source of truth. +// +// See https://go.dev/issue/60773 for a link to a more complete design with rationale, +// though parts of it are out-of-date. +// +// ## Invariants +// +// 1. An m that has a trace buffer MUST be on either the allm or sched.freem lists. +// +// Otherwise, traceAdvance might miss an M with a buffer that needs to be flushed. +// +// 2. Trace buffers MUST only be mutated in traceAdvance or under a traceAcquire/traceRelease. +// +// Otherwise, traceAdvance may race with Ms writing trace data when trying to flush buffers. +// +// 3. traceAdvance MUST NOT return until all of the current generation's buffers are flushed. +// +// Otherwise, callers cannot rely on all the data they need being available (for example, for +// the flight recorder). +// +// 4. P and goroutine state transition events MUST be emitted by an M that owns its ability +// to transition. +// +// What this means is that the M must either be the owner of the P, the owner of the goroutine, +// or owner of a non-running goroutine's _Gscan bit. There are a lot of bad things that can +// happen if this invariant isn't maintained, mostly around generating inconsistencies in the +// trace due to racy emission of events. +// +// 5. Acquisition of a P (pidleget or takeP/gcstopP) MUST NOT be performed under a traceAcquire/traceRelease pair. +// +// Notably, it's important that traceAcquire/traceRelease not cover a state in which the +// goroutine or P is not yet owned. For example, if traceAcquire is held across both wirep and +// pidleget, then we could end up emitting an event in the wrong generation. Suppose T1 +// traceAcquires in generation 1, a generation transition happens, T2 emits a ProcStop and +// executes pidleput in generation 2, and finally T1 calls pidleget and emits ProcStart. +// The ProcStart must follow the ProcStop in the trace to make any sense, but ProcStop was +// emitted in a latter generation. +// +// 6. Goroutine state transitions, with the exception of transitions into _Grunning, MUST be +// performed under the traceAcquire/traceRelease pair where the event is emitted. +// +// Otherwise, traceAdvance may observe a goroutine state that is inconsistent with the +// events being emitted. traceAdvance inspects all goroutines' states in order to emit +// a status event for any goroutine that did not have an event emitted for it already. +// If the generation then advances in between that observation and the event being emitted, +// then the trace will contain a status that doesn't line up with the event. For example, +// if the event is emitted after the state transition _Gwaiting -> _Grunnable, then +// traceAdvance may observe the goroutine in _Grunnable, emit a status event, advance the +// generation, and the following generation contains a GoUnblock event. The trace parser +// will get confused because it sees that goroutine in _Grunnable in the previous generation +// trying to be transitioned from _Gwaiting into _Grunnable in the following one. Something +// similar happens if the trace event is emitted before the state transition, so that does +// not help either. +// +// Transitions to _Grunning do not have the same problem because traceAdvance is unable to +// observe running goroutines directly. It must stop them, or wait for them to emit an event. +// Note that it cannot even stop them with asynchronous preemption in any "bad" window between +// the state transition to _Grunning and the event emission because async preemption cannot +// stop goroutines in the runtime. +// +// 7. Goroutine state transitions into _Grunning MUST emit an event for the transition after +// the state transition. +// +// This follows from invariants (4), (5), and the explanation of (6). +// The relevant part of the previous invariant is that in order for the tracer to be unable to +// stop a goroutine, it must be in _Grunning and in the runtime. So to close any windows between +// event emission and the state transition, the event emission must happen *after* the transition +// to _Grunning. +// +// ## Select corner cases +// +// ### CGO calls / system calls +// +// CGO calls and system calls are mostly straightforward, except for P stealing. For historical +// reasons, this introduces a new trace-level P state called ProcSyscall which used to model +// _Psyscall (now _Psyscall_unused). This state is used to indicate in the trace that a P +// is eligible for stealing as part of the parser's ordering logic. +// +// Another quirk of this corner case is the ProcSyscallAbandoned trace-level P state, which +// is used only in status events to indicate a relaxation of verification requirements. It +// means that if the execution trace parser can't find the corresponding thread that the P +// was stolen from in the state it expects it to be, to accept the trace anyway. This is also +// historical. When _Psyscall still existed, one would steal and then ProcSteal, and there +// was no ordering between the ProcSteal and the subsequent GoSyscallEndBlocked. One clearly +// happened before the other, but since P stealing was a single atomic, there was no way +// to enforce the order. The GoSyscallEndBlocked thread could move on and end up in any +// state, and the GoSyscallEndBlocked could be in a completely different generation to the +// ProcSteal. Today this is no longer possible as the ProcSteal is always ordered before +// the GoSyscallEndBlocked event in the runtime. +// +// Both ProcSyscall and ProcSyscallAbandoned are likely no longer be necessary. +// +// ### CGO callbacks +// +// When a C thread calls into Go, the execution tracer models that as the creation of a new +// goroutine. When the thread exits back into C, that is modeled as the destruction of that +// goroutine. These are the GoCreateSyscall and GoDestroySyscall events, which represent the +// creation and destruction of a goroutine with its starting and ending states being _Gsyscall. +// +// This model is simple to reason about but contradicts the runtime implementation, which +// doesn't do this directly for performance reasons. The runtime implementation instead caches +// a G on the M created for the C thread. On Linux this M is then cached in the thread's TLS, +// and on other systems, the M is put on a global list on exit from Go. We need to do some +// extra work to make sure that this is modeled correctly in the the tracer. For example, +// a C thread exiting Go may leave a P hanging off of its M (whether that M is kept in TLS +// or placed back on a list). In order to correctly model goroutine creation and destruction, +// we must behave as if the P was at some point stolen by the runtime, if the C thread +// reenters Go with the same M (and thus, same P) once more. package runtime @@ -192,38 +354,14 @@ func StartTrace() error { // Stop the world. // - // The purpose of stopping the world is to make sure that no goroutine is in a - // context where it could emit an event by bringing all goroutines to a safe point - // with no opportunity to transition. + // What we need to successfully begin tracing is to make sure that the next time + // *any goroutine* hits a traceAcquire, it sees that the trace is enabled. // - // The exception to this rule are goroutines that are concurrently exiting a syscall. - // Those will all be forced into the syscalling slow path, and we'll just make sure - // that we don't observe any goroutines in that critical section before starting - // the world again. - // - // A good follow-up question to this is why stopping the world is necessary at all - // given that we have traceAcquire and traceRelease. Unfortunately, those only help - // us when tracing is already active (for performance, so when tracing is off the - // tracing seqlock is left untouched). The main issue here is subtle: we're going to - // want to obtain a correct starting status for each goroutine, but there are windows - // of time in which we could read and emit an incorrect status. Specifically: - // - // trace := traceAcquire() - // // <----> problem window - // casgstatus(gp, _Gwaiting, _Grunnable) - // if trace.ok() { - // trace.GoUnpark(gp, 2) - // traceRelease(trace) - // } - // - // More precisely, if we readgstatus for a gp while another goroutine is in the problem - // window and that goroutine didn't observe that tracing had begun, then we might write - // a GoStatus(GoWaiting) event for that goroutine, but it won't trace an event marking - // the transition from GoWaiting to GoRunnable. The trace will then be broken, because - // future events will be emitted assuming the tracer sees GoRunnable. - // - // In short, what we really need here is to make sure that the next time *any goroutine* - // hits a traceAcquire, it sees that the trace is enabled. + // Stopping the world gets us most of the way there, since it makes sure that goroutines + // stop executing. There is however one exception: goroutines without Ps concurrently + // exiting a syscall. We handle this by making sure that, after we update trace.gen, + // there isn't a single goroutine calling traceAcquire on the syscall slow path by checking + // trace.exitingSyscall. See the comment on the check below for more details. // // Note also that stopping the world is necessary to make sure sweep-related events are // coherent. Since the world is stopped and sweeps are non-preemptible, we can never start diff --git a/src/runtime/trace/flightrecorder_test.go b/src/runtime/trace/flightrecorder_test.go index 61cb03dcf60..221f9c6b673 100644 --- a/src/runtime/trace/flightrecorder_test.go +++ b/src/runtime/trace/flightrecorder_test.go @@ -119,7 +119,7 @@ func TestFlightRecorderConcurrentWriteTo(t *testing.T) { if buf.Len() == 0 { continue } - testReader(t, buf, testtrace.ExpectSuccess()) + testReader(t, buf.Bytes(), testtrace.ExpectSuccess()) } } @@ -260,12 +260,12 @@ func testFlightRecorder(t *testing.T, fr *trace.FlightRecorder, f flightRecorder traceBytes := buf.Bytes() // Parse the trace to make sure it's not broken. - testReader(t, bytes.NewReader(traceBytes), testtrace.ExpectSuccess()) + testReader(t, traceBytes, testtrace.ExpectSuccess()) return traceBytes } -func testReader(t *testing.T, tr io.Reader, exp *testtrace.Expectation) { - r, err := inttrace.NewReader(tr) +func testReader(t *testing.T, tb []byte, exp *testtrace.Expectation) { + r, err := inttrace.NewReader(bytes.NewReader(tb)) if err != nil { if err := exp.Check(err); err != nil { t.Error(err) @@ -292,6 +292,9 @@ func testReader(t *testing.T, tr io.Reader, exp *testtrace.Expectation) { if err := exp.Check(nil); err != nil { t.Error(err) } + if t.Failed() || *dumpTraces { + testtrace.Dump(t, "trace", tb, *dumpTraces) + } } func TestTraceAndFlightRecorder(t *testing.T) { diff --git a/src/runtime/trace/trace_test.go b/src/runtime/trace/trace_test.go index b891c8c8f9f..2174be061de 100644 --- a/src/runtime/trace/trace_test.go +++ b/src/runtime/trace/trace_test.go @@ -7,13 +7,16 @@ package trace_test import ( "bytes" "flag" - "os" . "runtime/trace" "testing" "time" ) -var saveTraces = flag.Bool("savetraces", false, "save traces collected by tests") +var dumpTraces = flag.Bool("dump-traces", false, "dump traces to a file, even on success") + +// This file just contains smoke tests and tests of runtime/trace logic only. +// It doesn't validate the resulting traces. See the internal/trace package for +// more comprehensive end-to-end tests. func TestTraceStartStop(t *testing.T) { if IsEnabled() { @@ -32,7 +35,6 @@ func TestTraceStartStop(t *testing.T) { if size != buf.Len() { t.Fatalf("trace writes after stop: %v -> %v", size, buf.Len()) } - saveTrace(t, buf, "TestTraceStartStop") } func TestTraceDoubleStart(t *testing.T) { @@ -50,12 +52,3 @@ func TestTraceDoubleStart(t *testing.T) { Stop() Stop() } - -func saveTrace(t *testing.T, buf *bytes.Buffer, name string) { - if !*saveTraces { - return - } - if err := os.WriteFile(name+".trace", buf.Bytes(), 0600); err != nil { - t.Errorf("failed to write trace file: %s", err) - } -} diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go index 8882c306edb..6649f724716 100644 --- a/src/runtime/traceback.go +++ b/src/runtime/traceback.go @@ -1208,6 +1208,7 @@ var gStatusStrings = [...]string{ _Gcopystack: "copystack", _Gleaked: "leaked", _Gpreempted: "preempted", + _Gdeadextra: "waiting for cgo callback", } func goroutineheader(gp *g) { @@ -1295,7 +1296,16 @@ func tracebacksomeothers(me *g, showf func(*g) bool) { // against concurrent creation of new Gs, but even with allglock we may // miss Gs created after this loop. forEachGRace(func(gp *g) { - if gp == me || gp == curgp || readgstatus(gp) == _Gdead || !showf(gp) || (isSystemGoroutine(gp, false) && level < 2) { + if gp == me || gp == curgp { + return + } + if status := readgstatus(gp); status == _Gdead || status == _Gdeadextra { + return + } + if !showf(gp) { + return + } + if isSystemGoroutine(gp, false) && level < 2 { return } print("\n") @@ -1304,7 +1314,16 @@ func tracebacksomeothers(me *g, showf func(*g) bool) { // from a signal handler initiated during a systemstack call. // The original G is still in the running state, and we want to // print its stack. - if gp.m != getg().m && readgstatus(gp)&^_Gscan == _Grunning { + // + // There's a small window of time in exitsyscall where a goroutine could be + // in _Grunning as it's exiting a syscall. This could be the case even if the + // world is stopped or frozen. + // + // This is OK because the goroutine will not exit the syscall while the world + // is stopped or frozen. This is also why it's safe to check syscallsp here, + // and safe to take the goroutine's stack trace. The syscall path mutates + // syscallsp only just before exiting the syscall. + if gp.m != getg().m && readgstatus(gp)&^_Gscan == _Grunning && gp.syscallsp == 0 { print("\tgoroutine running on other thread; stack unavailable\n") printcreatedby(gp) } else { diff --git a/src/runtime/traceevent.go b/src/runtime/traceevent.go index b0bc4c017da..86512754761 100644 --- a/src/runtime/traceevent.go +++ b/src/runtime/traceevent.go @@ -44,6 +44,13 @@ func (tl traceLocker) eventWriter(goStatus tracev2.GoStatus, procStatus tracev2. if gp := tl.mp.curg; gp != nil && !gp.trace.statusWasTraced(tl.gen) && gp.trace.acquireStatus(tl.gen) { tl.writer().writeGoStatus(gp.goid, int64(tl.mp.procid), goStatus, gp.inMarkAssist, 0 /* no stack */).end() } + return tl.rawEventWriter() +} + +// rawEventWriter creates a new traceEventWriter without emitting any status events. +// +// It is the caller's responsibility to emit any status events, if necessary. +func (tl traceLocker) rawEventWriter() traceEventWriter { return traceEventWriter{tl} } diff --git a/src/runtime/traceruntime.go b/src/runtime/traceruntime.go index 06e36fd8026..ad91d9c836c 100644 --- a/src/runtime/traceruntime.go +++ b/src/runtime/traceruntime.go @@ -29,6 +29,7 @@ type mTraceState struct { buf [2][tracev2.NumExperiments]*traceBuf // Per-M traceBuf for writing. Indexed by trace.gen%2. link *m // Snapshot of alllink or freelink. reentered uint32 // Whether we've reentered tracing from within tracing. + entryGen uintptr // The generation value on first entry. oldthrowsplit bool // gp.throwsplit upon calling traceLocker.writer. For debugging. } @@ -212,7 +213,7 @@ func traceAcquireEnabled() traceLocker { // that it is. if mp.trace.seqlock.Load()%2 == 1 { mp.trace.reentered++ - return traceLocker{mp, trace.gen.Load()} + return traceLocker{mp, mp.trace.entryGen} } // Acquire the trace seqlock. This prevents traceAdvance from moving forward @@ -240,6 +241,7 @@ func traceAcquireEnabled() traceLocker { releasem(mp) return traceLocker{} } + mp.trace.entryGen = gen return traceLocker{mp, gen} } @@ -532,19 +534,17 @@ func (tl traceLocker) GoSysExit(lostP bool) { // ProcSteal indicates that our current M stole a P from another M. // -// inSyscall indicates that we're stealing the P from a syscall context. -// // The caller must have ownership of pp. -func (tl traceLocker) ProcSteal(pp *p, inSyscall bool) { +func (tl traceLocker) ProcSteal(pp *p) { // Grab the M ID we stole from. mStolenFrom := pp.trace.mSyscallID pp.trace.mSyscallID = -1 // Emit the status of the P we're stealing. We may be just about to do this when creating the event - // writer but it's not guaranteed, even if inSyscall is true. Although it might seem like from a - // syscall context we're always stealing a P for ourselves, we may have not wired it up yet (so + // writer but it's not guaranteed, even if we're stealing from a syscall. Although it might seem like + // from a syscall context we're always stealing a P for ourselves, we may have not wired it up yet (so // it wouldn't be visible to eventWriter) or we may not even intend to wire it up to ourselves - // at all (e.g. entersyscall_gcwait). + // at all and plan to hand it back to the runtime. if !pp.trace.statusWasTraced(tl.gen) && pp.trace.acquireStatus(tl.gen) { // Careful: don't use the event writer. We never want status or in-progress events // to trigger more in-progress events. @@ -559,7 +559,7 @@ func (tl traceLocker) ProcSteal(pp *p, inSyscall bool) { // In the latter, we're a goroutine in a syscall. goStatus := tracev2.GoRunning procStatus := tracev2.ProcRunning - if inSyscall { + if tl.mp.curg != nil && tl.mp.curg.syscallsp != 0 { goStatus = tracev2.GoSyscall procStatus = tracev2.ProcSyscallAbandoned } @@ -593,19 +593,27 @@ func (tl traceLocker) GoCreateSyscall(gp *g) { // N.B. We should never trace a status for this goroutine (which we're currently running on), // since we want this to appear like goroutine creation. gp.trace.setStatusTraced(tl.gen) - tl.eventWriter(tracev2.GoBad, tracev2.ProcBad).event(tracev2.EvGoCreateSyscall, traceArg(gp.goid)) + + // We might have a P left over on the thread from the last cgo callback, + // but in a syscall context, it is NOT ours. Act as if we do not have a P, + // and don't record a status. + tl.rawEventWriter().event(tracev2.EvGoCreateSyscall, traceArg(gp.goid)) } // GoDestroySyscall indicates that a goroutine has transitioned from GoSyscall to dead. // -// Must not have a P. -// // This occurs when Go code returns back to C. On pthread platforms it occurs only when // the C thread is destroyed. func (tl traceLocker) GoDestroySyscall() { - // N.B. If we trace a status here, we must never have a P, and we must be on a goroutine - // that is in the syscall state. - tl.eventWriter(tracev2.GoSyscall, tracev2.ProcBad).event(tracev2.EvGoDestroySyscall) + // Write the status for the goroutine if necessary. + if gp := tl.mp.curg; gp != nil && !gp.trace.statusWasTraced(tl.gen) && gp.trace.acquireStatus(tl.gen) { + tl.writer().writeGoStatus(gp.goid, int64(tl.mp.procid), tracev2.GoSyscall, false, 0 /* no stack */).end() + } + + // We might have a P left over on the thread from the last cgo callback, + // but in a syscall context, it is NOT ours. Act as if we do not have a P, + // and don't record a status. + tl.rawEventWriter().event(tracev2.EvGoDestroySyscall) } // To access runtime functions from runtime/trace. diff --git a/src/runtime/tracestatus.go b/src/runtime/tracestatus.go index 8b5eafd170f..6343126faf0 100644 --- a/src/runtime/tracestatus.go +++ b/src/runtime/tracestatus.go @@ -62,15 +62,14 @@ func (w traceWriter) writeProcStatusForP(pp *p, inSTW bool) traceWriter { } case _Prunning: status = tracev2.ProcRunning - // There's a short window wherein the goroutine may have entered _Gsyscall - // but it still owns the P (it's not in _Psyscall yet). The goroutine entering - // _Gsyscall is the tracer's signal that the P its bound to is also in a syscall, - // so we need to emit a status that matches. See #64318. + // A P is considered to be in a syscall if its attached G is. Since we fully + // own P, then the goroutine isn't going to transition and we can trivially + // check if the goroutine is in a syscall. This used to be just a small problematic + // window, but this is now the default since _Psyscall no longer exists. See #64318 + // for the history on why it was needed while _Psyscall still existed. if w.mp.p.ptr() == pp && w.mp.curg != nil && readgstatus(w.mp.curg)&^_Gscan == _Gsyscall { status = tracev2.ProcSyscall } - case _Psyscall: - status = tracev2.ProcSyscall default: throw("attempt to trace invalid or unsupported P status") } @@ -134,7 +133,7 @@ func goStatusToTraceGoStatus(status uint32, wr waitReason) tracev2.GoStatus { if status == _Gwaiting && wr.isWaitingForSuspendG() { tgs = tracev2.GoRunning } - case _Gdead: + case _Gdead, _Gdeadextra: throw("tried to trace dead goroutine") default: throw("tried to trace goroutine with invalid or unsupported status") diff --git a/src/runtime/vdso_freebsd.go b/src/runtime/vdso_freebsd.go index feecada0357..99ca1571a7a 100644 --- a/src/runtime/vdso_freebsd.go +++ b/src/runtime/vdso_freebsd.go @@ -54,6 +54,9 @@ func binuptime(abs bool) (bt bintime) { } curr := atomic.Load(&timekeepSharedPage.current) // atomic_load_acq_32 + if curr >= uint32(len(timehands)) { + return zeroBintime + } th := &timehands[curr] gen := atomic.Load(&th.gen) // atomic_load_acq_32 bt = th.offset diff --git a/src/simd/string.go b/src/simd/string.go index 35584da0218..a692653aa0d 100644 --- a/src/simd/string.go +++ b/src/simd/string.go @@ -7,8 +7,7 @@ package simd import ( - "internal/ftoa" - "internal/itoa" + "internal/strconv" ) type number interface { @@ -23,25 +22,25 @@ func sliceToString[T number](x []T) string { pfx = "," switch e := any(y).(type) { case int8: - s += itoa.Itoa(int(e)) + s += strconv.Itoa(int(e)) case int16: - s += itoa.Itoa(int(e)) + s += strconv.Itoa(int(e)) case int32: - s += itoa.Itoa(int(e)) + s += strconv.Itoa(int(e)) case int64: - s += itoa.Itoa(int(e)) + s += strconv.Itoa(int(e)) case uint8: - s += itoa.Uitoa(uint(e)) + s += strconv.FormatUint(uint64(e), 10) case uint16: - s += itoa.Uitoa(uint(e)) + s += strconv.FormatUint(uint64(e), 10) case uint32: - s += itoa.Uitoa(uint(e)) + s += strconv.FormatUint(uint64(e), 10) case uint64: - s += itoa.Uitoa(uint(e)) + s += strconv.FormatUint(uint64(e), 10) case float32: - s += ftoa.FormatFloat(float64(e), 'g', -1, 32) + s += strconv.FormatFloat(float64(e), 'g', -1, 32) case float64: - s += ftoa.FormatFloat(e, 'g', -1, 64) + s += strconv.FormatFloat(e, 'g', -1, 64) } } s += "}" diff --git a/src/strconv/eisel_lemire.go b/src/strconv/eisel_lemire.go deleted file mode 100644 index 03842e50797..00000000000 --- a/src/strconv/eisel_lemire.go +++ /dev/null @@ -1,884 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package strconv - -// This file implements the Eisel-Lemire ParseFloat algorithm, published in -// 2020 and discussed extensively at -// https://nigeltao.github.io/blog/2020/eisel-lemire.html -// -// The original C++ implementation is at -// https://github.com/lemire/fast_double_parser/blob/644bef4306059d3be01a04e77d3cc84b379c596f/include/fast_double_parser.h#L840 -// -// This Go re-implementation closely follows the C re-implementation at -// https://github.com/google/wuffs/blob/ba3818cb6b473a2ed0b38ecfc07dbbd3a97e8ae7/internal/cgen/base/floatconv-submodule-code.c#L990 -// -// Additional testing (on over several million test strings) is done by -// https://github.com/nigeltao/parse-number-fxx-test-data/blob/5280dcfccf6d0b02a65ae282dad0b6d9de50e039/script/test-go-strconv.go - -import ( - "math" - "math/bits" -) - -func eiselLemire64(man uint64, exp10 int, neg bool) (f float64, ok bool) { - // The terse comments in this function body refer to sections of the - // https://nigeltao.github.io/blog/2020/eisel-lemire.html blog post. - - // Exp10 Range. - if man == 0 { - if neg { - f = math.Float64frombits(0x8000000000000000) // Negative zero. - } - return f, true - } - if exp10 < detailedPowersOfTenMinExp10 || detailedPowersOfTenMaxExp10 < exp10 { - return 0, false - } - - // Normalization. - clz := bits.LeadingZeros64(man) - man <<= uint(clz) - const float64ExponentBias = 1023 - retExp2 := uint64(217706*exp10>>16+64+float64ExponentBias) - uint64(clz) - - // Multiplication. - xHi, xLo := bits.Mul64(man, detailedPowersOfTen[exp10-detailedPowersOfTenMinExp10][1]) - - // Wider Approximation. - if xHi&0x1FF == 0x1FF && xLo+man < man { - yHi, yLo := bits.Mul64(man, detailedPowersOfTen[exp10-detailedPowersOfTenMinExp10][0]) - mergedHi, mergedLo := xHi, xLo+yHi - if mergedLo < xLo { - mergedHi++ - } - if mergedHi&0x1FF == 0x1FF && mergedLo+1 == 0 && yLo+man < man { - return 0, false - } - xHi, xLo = mergedHi, mergedLo - } - - // Shifting to 54 Bits. - msb := xHi >> 63 - retMantissa := xHi >> (msb + 9) - retExp2 -= 1 ^ msb - - // Half-way Ambiguity. - if xLo == 0 && xHi&0x1FF == 0 && retMantissa&3 == 1 { - return 0, false - } - - // From 54 to 53 Bits. - retMantissa += retMantissa & 1 - retMantissa >>= 1 - if retMantissa>>53 > 0 { - retMantissa >>= 1 - retExp2 += 1 - } - // retExp2 is a uint64. Zero or underflow means that we're in subnormal - // float64 space. 0x7FF or above means that we're in Inf/NaN float64 space. - // - // The if block is equivalent to (but has fewer branches than): - // if retExp2 <= 0 || retExp2 >= 0x7FF { etc } - if retExp2-1 >= 0x7FF-1 { - return 0, false - } - retBits := retExp2<<52 | retMantissa&0x000FFFFFFFFFFFFF - if neg { - retBits |= 0x8000000000000000 - } - return math.Float64frombits(retBits), true -} - -func eiselLemire32(man uint64, exp10 int, neg bool) (f float32, ok bool) { - // The terse comments in this function body refer to sections of the - // https://nigeltao.github.io/blog/2020/eisel-lemire.html blog post. - // - // That blog post discusses the float64 flavor (11 exponent bits with a - // -1023 bias, 52 mantissa bits) of the algorithm, but the same approach - // applies to the float32 flavor (8 exponent bits with a -127 bias, 23 - // mantissa bits). The computation here happens with 64-bit values (e.g. - // man, xHi, retMantissa) before finally converting to a 32-bit float. - - // Exp10 Range. - if man == 0 { - if neg { - f = math.Float32frombits(0x80000000) // Negative zero. - } - return f, true - } - if exp10 < detailedPowersOfTenMinExp10 || detailedPowersOfTenMaxExp10 < exp10 { - return 0, false - } - - // Normalization. - clz := bits.LeadingZeros64(man) - man <<= uint(clz) - const float32ExponentBias = 127 - retExp2 := uint64(217706*exp10>>16+64+float32ExponentBias) - uint64(clz) - - // Multiplication. - xHi, xLo := bits.Mul64(man, detailedPowersOfTen[exp10-detailedPowersOfTenMinExp10][1]) - - // Wider Approximation. - if xHi&0x3FFFFFFFFF == 0x3FFFFFFFFF && xLo+man < man { - yHi, yLo := bits.Mul64(man, detailedPowersOfTen[exp10-detailedPowersOfTenMinExp10][0]) - mergedHi, mergedLo := xHi, xLo+yHi - if mergedLo < xLo { - mergedHi++ - } - if mergedHi&0x3FFFFFFFFF == 0x3FFFFFFFFF && mergedLo+1 == 0 && yLo+man < man { - return 0, false - } - xHi, xLo = mergedHi, mergedLo - } - - // Shifting to 54 Bits (and for float32, it's shifting to 25 bits). - msb := xHi >> 63 - retMantissa := xHi >> (msb + 38) - retExp2 -= 1 ^ msb - - // Half-way Ambiguity. - if xLo == 0 && xHi&0x3FFFFFFFFF == 0 && retMantissa&3 == 1 { - return 0, false - } - - // From 54 to 53 Bits (and for float32, it's from 25 to 24 bits). - retMantissa += retMantissa & 1 - retMantissa >>= 1 - if retMantissa>>24 > 0 { - retMantissa >>= 1 - retExp2 += 1 - } - // retExp2 is a uint64. Zero or underflow means that we're in subnormal - // float32 space. 0xFF or above means that we're in Inf/NaN float32 space. - // - // The if block is equivalent to (but has fewer branches than): - // if retExp2 <= 0 || retExp2 >= 0xFF { etc } - if retExp2-1 >= 0xFF-1 { - return 0, false - } - retBits := retExp2<<23 | retMantissa&0x007FFFFF - if neg { - retBits |= 0x80000000 - } - return math.Float32frombits(uint32(retBits)), true -} - -// detailedPowersOfTen{Min,Max}Exp10 is the power of 10 represented by the -// first and last rows of detailedPowersOfTen. Both bounds are inclusive. -const ( - detailedPowersOfTenMinExp10 = -348 - detailedPowersOfTenMaxExp10 = +347 -) - -// detailedPowersOfTen contains 128-bit mantissa approximations (rounded down) -// to the powers of 10. For example: -// -// - 1e43 ≈ (0xE596B7B0_C643C719 * (2 ** 79)) -// - 1e43 = (0xE596B7B0_C643C719_6D9CCD05_D0000000 * (2 ** 15)) -// -// The mantissas are explicitly listed. The exponents are implied by a linear -// expression with slope 217706.0/65536.0 ≈ log(10)/log(2). -// -// The table was generated by -// https://github.com/google/wuffs/blob/ba3818cb6b473a2ed0b38ecfc07dbbd3a97e8ae7/script/print-mpb-powers-of-10.go -var detailedPowersOfTen = [...][2]uint64{ - {0x1732C869CD60E453, 0xFA8FD5A0081C0288}, // 1e-348 - {0x0E7FBD42205C8EB4, 0x9C99E58405118195}, // 1e-347 - {0x521FAC92A873B261, 0xC3C05EE50655E1FA}, // 1e-346 - {0xE6A797B752909EF9, 0xF4B0769E47EB5A78}, // 1e-345 - {0x9028BED2939A635C, 0x98EE4A22ECF3188B}, // 1e-344 - {0x7432EE873880FC33, 0xBF29DCABA82FDEAE}, // 1e-343 - {0x113FAA2906A13B3F, 0xEEF453D6923BD65A}, // 1e-342 - {0x4AC7CA59A424C507, 0x9558B4661B6565F8}, // 1e-341 - {0x5D79BCF00D2DF649, 0xBAAEE17FA23EBF76}, // 1e-340 - {0xF4D82C2C107973DC, 0xE95A99DF8ACE6F53}, // 1e-339 - {0x79071B9B8A4BE869, 0x91D8A02BB6C10594}, // 1e-338 - {0x9748E2826CDEE284, 0xB64EC836A47146F9}, // 1e-337 - {0xFD1B1B2308169B25, 0xE3E27A444D8D98B7}, // 1e-336 - {0xFE30F0F5E50E20F7, 0x8E6D8C6AB0787F72}, // 1e-335 - {0xBDBD2D335E51A935, 0xB208EF855C969F4F}, // 1e-334 - {0xAD2C788035E61382, 0xDE8B2B66B3BC4723}, // 1e-333 - {0x4C3BCB5021AFCC31, 0x8B16FB203055AC76}, // 1e-332 - {0xDF4ABE242A1BBF3D, 0xADDCB9E83C6B1793}, // 1e-331 - {0xD71D6DAD34A2AF0D, 0xD953E8624B85DD78}, // 1e-330 - {0x8672648C40E5AD68, 0x87D4713D6F33AA6B}, // 1e-329 - {0x680EFDAF511F18C2, 0xA9C98D8CCB009506}, // 1e-328 - {0x0212BD1B2566DEF2, 0xD43BF0EFFDC0BA48}, // 1e-327 - {0x014BB630F7604B57, 0x84A57695FE98746D}, // 1e-326 - {0x419EA3BD35385E2D, 0xA5CED43B7E3E9188}, // 1e-325 - {0x52064CAC828675B9, 0xCF42894A5DCE35EA}, // 1e-324 - {0x7343EFEBD1940993, 0x818995CE7AA0E1B2}, // 1e-323 - {0x1014EBE6C5F90BF8, 0xA1EBFB4219491A1F}, // 1e-322 - {0xD41A26E077774EF6, 0xCA66FA129F9B60A6}, // 1e-321 - {0x8920B098955522B4, 0xFD00B897478238D0}, // 1e-320 - {0x55B46E5F5D5535B0, 0x9E20735E8CB16382}, // 1e-319 - {0xEB2189F734AA831D, 0xC5A890362FDDBC62}, // 1e-318 - {0xA5E9EC7501D523E4, 0xF712B443BBD52B7B}, // 1e-317 - {0x47B233C92125366E, 0x9A6BB0AA55653B2D}, // 1e-316 - {0x999EC0BB696E840A, 0xC1069CD4EABE89F8}, // 1e-315 - {0xC00670EA43CA250D, 0xF148440A256E2C76}, // 1e-314 - {0x380406926A5E5728, 0x96CD2A865764DBCA}, // 1e-313 - {0xC605083704F5ECF2, 0xBC807527ED3E12BC}, // 1e-312 - {0xF7864A44C633682E, 0xEBA09271E88D976B}, // 1e-311 - {0x7AB3EE6AFBE0211D, 0x93445B8731587EA3}, // 1e-310 - {0x5960EA05BAD82964, 0xB8157268FDAE9E4C}, // 1e-309 - {0x6FB92487298E33BD, 0xE61ACF033D1A45DF}, // 1e-308 - {0xA5D3B6D479F8E056, 0x8FD0C16206306BAB}, // 1e-307 - {0x8F48A4899877186C, 0xB3C4F1BA87BC8696}, // 1e-306 - {0x331ACDABFE94DE87, 0xE0B62E2929ABA83C}, // 1e-305 - {0x9FF0C08B7F1D0B14, 0x8C71DCD9BA0B4925}, // 1e-304 - {0x07ECF0AE5EE44DD9, 0xAF8E5410288E1B6F}, // 1e-303 - {0xC9E82CD9F69D6150, 0xDB71E91432B1A24A}, // 1e-302 - {0xBE311C083A225CD2, 0x892731AC9FAF056E}, // 1e-301 - {0x6DBD630A48AAF406, 0xAB70FE17C79AC6CA}, // 1e-300 - {0x092CBBCCDAD5B108, 0xD64D3D9DB981787D}, // 1e-299 - {0x25BBF56008C58EA5, 0x85F0468293F0EB4E}, // 1e-298 - {0xAF2AF2B80AF6F24E, 0xA76C582338ED2621}, // 1e-297 - {0x1AF5AF660DB4AEE1, 0xD1476E2C07286FAA}, // 1e-296 - {0x50D98D9FC890ED4D, 0x82CCA4DB847945CA}, // 1e-295 - {0xE50FF107BAB528A0, 0xA37FCE126597973C}, // 1e-294 - {0x1E53ED49A96272C8, 0xCC5FC196FEFD7D0C}, // 1e-293 - {0x25E8E89C13BB0F7A, 0xFF77B1FCBEBCDC4F}, // 1e-292 - {0x77B191618C54E9AC, 0x9FAACF3DF73609B1}, // 1e-291 - {0xD59DF5B9EF6A2417, 0xC795830D75038C1D}, // 1e-290 - {0x4B0573286B44AD1D, 0xF97AE3D0D2446F25}, // 1e-289 - {0x4EE367F9430AEC32, 0x9BECCE62836AC577}, // 1e-288 - {0x229C41F793CDA73F, 0xC2E801FB244576D5}, // 1e-287 - {0x6B43527578C1110F, 0xF3A20279ED56D48A}, // 1e-286 - {0x830A13896B78AAA9, 0x9845418C345644D6}, // 1e-285 - {0x23CC986BC656D553, 0xBE5691EF416BD60C}, // 1e-284 - {0x2CBFBE86B7EC8AA8, 0xEDEC366B11C6CB8F}, // 1e-283 - {0x7BF7D71432F3D6A9, 0x94B3A202EB1C3F39}, // 1e-282 - {0xDAF5CCD93FB0CC53, 0xB9E08A83A5E34F07}, // 1e-281 - {0xD1B3400F8F9CFF68, 0xE858AD248F5C22C9}, // 1e-280 - {0x23100809B9C21FA1, 0x91376C36D99995BE}, // 1e-279 - {0xABD40A0C2832A78A, 0xB58547448FFFFB2D}, // 1e-278 - {0x16C90C8F323F516C, 0xE2E69915B3FFF9F9}, // 1e-277 - {0xAE3DA7D97F6792E3, 0x8DD01FAD907FFC3B}, // 1e-276 - {0x99CD11CFDF41779C, 0xB1442798F49FFB4A}, // 1e-275 - {0x40405643D711D583, 0xDD95317F31C7FA1D}, // 1e-274 - {0x482835EA666B2572, 0x8A7D3EEF7F1CFC52}, // 1e-273 - {0xDA3243650005EECF, 0xAD1C8EAB5EE43B66}, // 1e-272 - {0x90BED43E40076A82, 0xD863B256369D4A40}, // 1e-271 - {0x5A7744A6E804A291, 0x873E4F75E2224E68}, // 1e-270 - {0x711515D0A205CB36, 0xA90DE3535AAAE202}, // 1e-269 - {0x0D5A5B44CA873E03, 0xD3515C2831559A83}, // 1e-268 - {0xE858790AFE9486C2, 0x8412D9991ED58091}, // 1e-267 - {0x626E974DBE39A872, 0xA5178FFF668AE0B6}, // 1e-266 - {0xFB0A3D212DC8128F, 0xCE5D73FF402D98E3}, // 1e-265 - {0x7CE66634BC9D0B99, 0x80FA687F881C7F8E}, // 1e-264 - {0x1C1FFFC1EBC44E80, 0xA139029F6A239F72}, // 1e-263 - {0xA327FFB266B56220, 0xC987434744AC874E}, // 1e-262 - {0x4BF1FF9F0062BAA8, 0xFBE9141915D7A922}, // 1e-261 - {0x6F773FC3603DB4A9, 0x9D71AC8FADA6C9B5}, // 1e-260 - {0xCB550FB4384D21D3, 0xC4CE17B399107C22}, // 1e-259 - {0x7E2A53A146606A48, 0xF6019DA07F549B2B}, // 1e-258 - {0x2EDA7444CBFC426D, 0x99C102844F94E0FB}, // 1e-257 - {0xFA911155FEFB5308, 0xC0314325637A1939}, // 1e-256 - {0x793555AB7EBA27CA, 0xF03D93EEBC589F88}, // 1e-255 - {0x4BC1558B2F3458DE, 0x96267C7535B763B5}, // 1e-254 - {0x9EB1AAEDFB016F16, 0xBBB01B9283253CA2}, // 1e-253 - {0x465E15A979C1CADC, 0xEA9C227723EE8BCB}, // 1e-252 - {0x0BFACD89EC191EC9, 0x92A1958A7675175F}, // 1e-251 - {0xCEF980EC671F667B, 0xB749FAED14125D36}, // 1e-250 - {0x82B7E12780E7401A, 0xE51C79A85916F484}, // 1e-249 - {0xD1B2ECB8B0908810, 0x8F31CC0937AE58D2}, // 1e-248 - {0x861FA7E6DCB4AA15, 0xB2FE3F0B8599EF07}, // 1e-247 - {0x67A791E093E1D49A, 0xDFBDCECE67006AC9}, // 1e-246 - {0xE0C8BB2C5C6D24E0, 0x8BD6A141006042BD}, // 1e-245 - {0x58FAE9F773886E18, 0xAECC49914078536D}, // 1e-244 - {0xAF39A475506A899E, 0xDA7F5BF590966848}, // 1e-243 - {0x6D8406C952429603, 0x888F99797A5E012D}, // 1e-242 - {0xC8E5087BA6D33B83, 0xAAB37FD7D8F58178}, // 1e-241 - {0xFB1E4A9A90880A64, 0xD5605FCDCF32E1D6}, // 1e-240 - {0x5CF2EEA09A55067F, 0x855C3BE0A17FCD26}, // 1e-239 - {0xF42FAA48C0EA481E, 0xA6B34AD8C9DFC06F}, // 1e-238 - {0xF13B94DAF124DA26, 0xD0601D8EFC57B08B}, // 1e-237 - {0x76C53D08D6B70858, 0x823C12795DB6CE57}, // 1e-236 - {0x54768C4B0C64CA6E, 0xA2CB1717B52481ED}, // 1e-235 - {0xA9942F5DCF7DFD09, 0xCB7DDCDDA26DA268}, // 1e-234 - {0xD3F93B35435D7C4C, 0xFE5D54150B090B02}, // 1e-233 - {0xC47BC5014A1A6DAF, 0x9EFA548D26E5A6E1}, // 1e-232 - {0x359AB6419CA1091B, 0xC6B8E9B0709F109A}, // 1e-231 - {0xC30163D203C94B62, 0xF867241C8CC6D4C0}, // 1e-230 - {0x79E0DE63425DCF1D, 0x9B407691D7FC44F8}, // 1e-229 - {0x985915FC12F542E4, 0xC21094364DFB5636}, // 1e-228 - {0x3E6F5B7B17B2939D, 0xF294B943E17A2BC4}, // 1e-227 - {0xA705992CEECF9C42, 0x979CF3CA6CEC5B5A}, // 1e-226 - {0x50C6FF782A838353, 0xBD8430BD08277231}, // 1e-225 - {0xA4F8BF5635246428, 0xECE53CEC4A314EBD}, // 1e-224 - {0x871B7795E136BE99, 0x940F4613AE5ED136}, // 1e-223 - {0x28E2557B59846E3F, 0xB913179899F68584}, // 1e-222 - {0x331AEADA2FE589CF, 0xE757DD7EC07426E5}, // 1e-221 - {0x3FF0D2C85DEF7621, 0x9096EA6F3848984F}, // 1e-220 - {0x0FED077A756B53A9, 0xB4BCA50B065ABE63}, // 1e-219 - {0xD3E8495912C62894, 0xE1EBCE4DC7F16DFB}, // 1e-218 - {0x64712DD7ABBBD95C, 0x8D3360F09CF6E4BD}, // 1e-217 - {0xBD8D794D96AACFB3, 0xB080392CC4349DEC}, // 1e-216 - {0xECF0D7A0FC5583A0, 0xDCA04777F541C567}, // 1e-215 - {0xF41686C49DB57244, 0x89E42CAAF9491B60}, // 1e-214 - {0x311C2875C522CED5, 0xAC5D37D5B79B6239}, // 1e-213 - {0x7D633293366B828B, 0xD77485CB25823AC7}, // 1e-212 - {0xAE5DFF9C02033197, 0x86A8D39EF77164BC}, // 1e-211 - {0xD9F57F830283FDFC, 0xA8530886B54DBDEB}, // 1e-210 - {0xD072DF63C324FD7B, 0xD267CAA862A12D66}, // 1e-209 - {0x4247CB9E59F71E6D, 0x8380DEA93DA4BC60}, // 1e-208 - {0x52D9BE85F074E608, 0xA46116538D0DEB78}, // 1e-207 - {0x67902E276C921F8B, 0xCD795BE870516656}, // 1e-206 - {0x00BA1CD8A3DB53B6, 0x806BD9714632DFF6}, // 1e-205 - {0x80E8A40ECCD228A4, 0xA086CFCD97BF97F3}, // 1e-204 - {0x6122CD128006B2CD, 0xC8A883C0FDAF7DF0}, // 1e-203 - {0x796B805720085F81, 0xFAD2A4B13D1B5D6C}, // 1e-202 - {0xCBE3303674053BB0, 0x9CC3A6EEC6311A63}, // 1e-201 - {0xBEDBFC4411068A9C, 0xC3F490AA77BD60FC}, // 1e-200 - {0xEE92FB5515482D44, 0xF4F1B4D515ACB93B}, // 1e-199 - {0x751BDD152D4D1C4A, 0x991711052D8BF3C5}, // 1e-198 - {0xD262D45A78A0635D, 0xBF5CD54678EEF0B6}, // 1e-197 - {0x86FB897116C87C34, 0xEF340A98172AACE4}, // 1e-196 - {0xD45D35E6AE3D4DA0, 0x9580869F0E7AAC0E}, // 1e-195 - {0x8974836059CCA109, 0xBAE0A846D2195712}, // 1e-194 - {0x2BD1A438703FC94B, 0xE998D258869FACD7}, // 1e-193 - {0x7B6306A34627DDCF, 0x91FF83775423CC06}, // 1e-192 - {0x1A3BC84C17B1D542, 0xB67F6455292CBF08}, // 1e-191 - {0x20CABA5F1D9E4A93, 0xE41F3D6A7377EECA}, // 1e-190 - {0x547EB47B7282EE9C, 0x8E938662882AF53E}, // 1e-189 - {0xE99E619A4F23AA43, 0xB23867FB2A35B28D}, // 1e-188 - {0x6405FA00E2EC94D4, 0xDEC681F9F4C31F31}, // 1e-187 - {0xDE83BC408DD3DD04, 0x8B3C113C38F9F37E}, // 1e-186 - {0x9624AB50B148D445, 0xAE0B158B4738705E}, // 1e-185 - {0x3BADD624DD9B0957, 0xD98DDAEE19068C76}, // 1e-184 - {0xE54CA5D70A80E5D6, 0x87F8A8D4CFA417C9}, // 1e-183 - {0x5E9FCF4CCD211F4C, 0xA9F6D30A038D1DBC}, // 1e-182 - {0x7647C3200069671F, 0xD47487CC8470652B}, // 1e-181 - {0x29ECD9F40041E073, 0x84C8D4DFD2C63F3B}, // 1e-180 - {0xF468107100525890, 0xA5FB0A17C777CF09}, // 1e-179 - {0x7182148D4066EEB4, 0xCF79CC9DB955C2CC}, // 1e-178 - {0xC6F14CD848405530, 0x81AC1FE293D599BF}, // 1e-177 - {0xB8ADA00E5A506A7C, 0xA21727DB38CB002F}, // 1e-176 - {0xA6D90811F0E4851C, 0xCA9CF1D206FDC03B}, // 1e-175 - {0x908F4A166D1DA663, 0xFD442E4688BD304A}, // 1e-174 - {0x9A598E4E043287FE, 0x9E4A9CEC15763E2E}, // 1e-173 - {0x40EFF1E1853F29FD, 0xC5DD44271AD3CDBA}, // 1e-172 - {0xD12BEE59E68EF47C, 0xF7549530E188C128}, // 1e-171 - {0x82BB74F8301958CE, 0x9A94DD3E8CF578B9}, // 1e-170 - {0xE36A52363C1FAF01, 0xC13A148E3032D6E7}, // 1e-169 - {0xDC44E6C3CB279AC1, 0xF18899B1BC3F8CA1}, // 1e-168 - {0x29AB103A5EF8C0B9, 0x96F5600F15A7B7E5}, // 1e-167 - {0x7415D448F6B6F0E7, 0xBCB2B812DB11A5DE}, // 1e-166 - {0x111B495B3464AD21, 0xEBDF661791D60F56}, // 1e-165 - {0xCAB10DD900BEEC34, 0x936B9FCEBB25C995}, // 1e-164 - {0x3D5D514F40EEA742, 0xB84687C269EF3BFB}, // 1e-163 - {0x0CB4A5A3112A5112, 0xE65829B3046B0AFA}, // 1e-162 - {0x47F0E785EABA72AB, 0x8FF71A0FE2C2E6DC}, // 1e-161 - {0x59ED216765690F56, 0xB3F4E093DB73A093}, // 1e-160 - {0x306869C13EC3532C, 0xE0F218B8D25088B8}, // 1e-159 - {0x1E414218C73A13FB, 0x8C974F7383725573}, // 1e-158 - {0xE5D1929EF90898FA, 0xAFBD2350644EEACF}, // 1e-157 - {0xDF45F746B74ABF39, 0xDBAC6C247D62A583}, // 1e-156 - {0x6B8BBA8C328EB783, 0x894BC396CE5DA772}, // 1e-155 - {0x066EA92F3F326564, 0xAB9EB47C81F5114F}, // 1e-154 - {0xC80A537B0EFEFEBD, 0xD686619BA27255A2}, // 1e-153 - {0xBD06742CE95F5F36, 0x8613FD0145877585}, // 1e-152 - {0x2C48113823B73704, 0xA798FC4196E952E7}, // 1e-151 - {0xF75A15862CA504C5, 0xD17F3B51FCA3A7A0}, // 1e-150 - {0x9A984D73DBE722FB, 0x82EF85133DE648C4}, // 1e-149 - {0xC13E60D0D2E0EBBA, 0xA3AB66580D5FDAF5}, // 1e-148 - {0x318DF905079926A8, 0xCC963FEE10B7D1B3}, // 1e-147 - {0xFDF17746497F7052, 0xFFBBCFE994E5C61F}, // 1e-146 - {0xFEB6EA8BEDEFA633, 0x9FD561F1FD0F9BD3}, // 1e-145 - {0xFE64A52EE96B8FC0, 0xC7CABA6E7C5382C8}, // 1e-144 - {0x3DFDCE7AA3C673B0, 0xF9BD690A1B68637B}, // 1e-143 - {0x06BEA10CA65C084E, 0x9C1661A651213E2D}, // 1e-142 - {0x486E494FCFF30A62, 0xC31BFA0FE5698DB8}, // 1e-141 - {0x5A89DBA3C3EFCCFA, 0xF3E2F893DEC3F126}, // 1e-140 - {0xF89629465A75E01C, 0x986DDB5C6B3A76B7}, // 1e-139 - {0xF6BBB397F1135823, 0xBE89523386091465}, // 1e-138 - {0x746AA07DED582E2C, 0xEE2BA6C0678B597F}, // 1e-137 - {0xA8C2A44EB4571CDC, 0x94DB483840B717EF}, // 1e-136 - {0x92F34D62616CE413, 0xBA121A4650E4DDEB}, // 1e-135 - {0x77B020BAF9C81D17, 0xE896A0D7E51E1566}, // 1e-134 - {0x0ACE1474DC1D122E, 0x915E2486EF32CD60}, // 1e-133 - {0x0D819992132456BA, 0xB5B5ADA8AAFF80B8}, // 1e-132 - {0x10E1FFF697ED6C69, 0xE3231912D5BF60E6}, // 1e-131 - {0xCA8D3FFA1EF463C1, 0x8DF5EFABC5979C8F}, // 1e-130 - {0xBD308FF8A6B17CB2, 0xB1736B96B6FD83B3}, // 1e-129 - {0xAC7CB3F6D05DDBDE, 0xDDD0467C64BCE4A0}, // 1e-128 - {0x6BCDF07A423AA96B, 0x8AA22C0DBEF60EE4}, // 1e-127 - {0x86C16C98D2C953C6, 0xAD4AB7112EB3929D}, // 1e-126 - {0xE871C7BF077BA8B7, 0xD89D64D57A607744}, // 1e-125 - {0x11471CD764AD4972, 0x87625F056C7C4A8B}, // 1e-124 - {0xD598E40D3DD89BCF, 0xA93AF6C6C79B5D2D}, // 1e-123 - {0x4AFF1D108D4EC2C3, 0xD389B47879823479}, // 1e-122 - {0xCEDF722A585139BA, 0x843610CB4BF160CB}, // 1e-121 - {0xC2974EB4EE658828, 0xA54394FE1EEDB8FE}, // 1e-120 - {0x733D226229FEEA32, 0xCE947A3DA6A9273E}, // 1e-119 - {0x0806357D5A3F525F, 0x811CCC668829B887}, // 1e-118 - {0xCA07C2DCB0CF26F7, 0xA163FF802A3426A8}, // 1e-117 - {0xFC89B393DD02F0B5, 0xC9BCFF6034C13052}, // 1e-116 - {0xBBAC2078D443ACE2, 0xFC2C3F3841F17C67}, // 1e-115 - {0xD54B944B84AA4C0D, 0x9D9BA7832936EDC0}, // 1e-114 - {0x0A9E795E65D4DF11, 0xC5029163F384A931}, // 1e-113 - {0x4D4617B5FF4A16D5, 0xF64335BCF065D37D}, // 1e-112 - {0x504BCED1BF8E4E45, 0x99EA0196163FA42E}, // 1e-111 - {0xE45EC2862F71E1D6, 0xC06481FB9BCF8D39}, // 1e-110 - {0x5D767327BB4E5A4C, 0xF07DA27A82C37088}, // 1e-109 - {0x3A6A07F8D510F86F, 0x964E858C91BA2655}, // 1e-108 - {0x890489F70A55368B, 0xBBE226EFB628AFEA}, // 1e-107 - {0x2B45AC74CCEA842E, 0xEADAB0ABA3B2DBE5}, // 1e-106 - {0x3B0B8BC90012929D, 0x92C8AE6B464FC96F}, // 1e-105 - {0x09CE6EBB40173744, 0xB77ADA0617E3BBCB}, // 1e-104 - {0xCC420A6A101D0515, 0xE55990879DDCAABD}, // 1e-103 - {0x9FA946824A12232D, 0x8F57FA54C2A9EAB6}, // 1e-102 - {0x47939822DC96ABF9, 0xB32DF8E9F3546564}, // 1e-101 - {0x59787E2B93BC56F7, 0xDFF9772470297EBD}, // 1e-100 - {0x57EB4EDB3C55B65A, 0x8BFBEA76C619EF36}, // 1e-99 - {0xEDE622920B6B23F1, 0xAEFAE51477A06B03}, // 1e-98 - {0xE95FAB368E45ECED, 0xDAB99E59958885C4}, // 1e-97 - {0x11DBCB0218EBB414, 0x88B402F7FD75539B}, // 1e-96 - {0xD652BDC29F26A119, 0xAAE103B5FCD2A881}, // 1e-95 - {0x4BE76D3346F0495F, 0xD59944A37C0752A2}, // 1e-94 - {0x6F70A4400C562DDB, 0x857FCAE62D8493A5}, // 1e-93 - {0xCB4CCD500F6BB952, 0xA6DFBD9FB8E5B88E}, // 1e-92 - {0x7E2000A41346A7A7, 0xD097AD07A71F26B2}, // 1e-91 - {0x8ED400668C0C28C8, 0x825ECC24C873782F}, // 1e-90 - {0x728900802F0F32FA, 0xA2F67F2DFA90563B}, // 1e-89 - {0x4F2B40A03AD2FFB9, 0xCBB41EF979346BCA}, // 1e-88 - {0xE2F610C84987BFA8, 0xFEA126B7D78186BC}, // 1e-87 - {0x0DD9CA7D2DF4D7C9, 0x9F24B832E6B0F436}, // 1e-86 - {0x91503D1C79720DBB, 0xC6EDE63FA05D3143}, // 1e-85 - {0x75A44C6397CE912A, 0xF8A95FCF88747D94}, // 1e-84 - {0xC986AFBE3EE11ABA, 0x9B69DBE1B548CE7C}, // 1e-83 - {0xFBE85BADCE996168, 0xC24452DA229B021B}, // 1e-82 - {0xFAE27299423FB9C3, 0xF2D56790AB41C2A2}, // 1e-81 - {0xDCCD879FC967D41A, 0x97C560BA6B0919A5}, // 1e-80 - {0x5400E987BBC1C920, 0xBDB6B8E905CB600F}, // 1e-79 - {0x290123E9AAB23B68, 0xED246723473E3813}, // 1e-78 - {0xF9A0B6720AAF6521, 0x9436C0760C86E30B}, // 1e-77 - {0xF808E40E8D5B3E69, 0xB94470938FA89BCE}, // 1e-76 - {0xB60B1D1230B20E04, 0xE7958CB87392C2C2}, // 1e-75 - {0xB1C6F22B5E6F48C2, 0x90BD77F3483BB9B9}, // 1e-74 - {0x1E38AEB6360B1AF3, 0xB4ECD5F01A4AA828}, // 1e-73 - {0x25C6DA63C38DE1B0, 0xE2280B6C20DD5232}, // 1e-72 - {0x579C487E5A38AD0E, 0x8D590723948A535F}, // 1e-71 - {0x2D835A9DF0C6D851, 0xB0AF48EC79ACE837}, // 1e-70 - {0xF8E431456CF88E65, 0xDCDB1B2798182244}, // 1e-69 - {0x1B8E9ECB641B58FF, 0x8A08F0F8BF0F156B}, // 1e-68 - {0xE272467E3D222F3F, 0xAC8B2D36EED2DAC5}, // 1e-67 - {0x5B0ED81DCC6ABB0F, 0xD7ADF884AA879177}, // 1e-66 - {0x98E947129FC2B4E9, 0x86CCBB52EA94BAEA}, // 1e-65 - {0x3F2398D747B36224, 0xA87FEA27A539E9A5}, // 1e-64 - {0x8EEC7F0D19A03AAD, 0xD29FE4B18E88640E}, // 1e-63 - {0x1953CF68300424AC, 0x83A3EEEEF9153E89}, // 1e-62 - {0x5FA8C3423C052DD7, 0xA48CEAAAB75A8E2B}, // 1e-61 - {0x3792F412CB06794D, 0xCDB02555653131B6}, // 1e-60 - {0xE2BBD88BBEE40BD0, 0x808E17555F3EBF11}, // 1e-59 - {0x5B6ACEAEAE9D0EC4, 0xA0B19D2AB70E6ED6}, // 1e-58 - {0xF245825A5A445275, 0xC8DE047564D20A8B}, // 1e-57 - {0xEED6E2F0F0D56712, 0xFB158592BE068D2E}, // 1e-56 - {0x55464DD69685606B, 0x9CED737BB6C4183D}, // 1e-55 - {0xAA97E14C3C26B886, 0xC428D05AA4751E4C}, // 1e-54 - {0xD53DD99F4B3066A8, 0xF53304714D9265DF}, // 1e-53 - {0xE546A8038EFE4029, 0x993FE2C6D07B7FAB}, // 1e-52 - {0xDE98520472BDD033, 0xBF8FDB78849A5F96}, // 1e-51 - {0x963E66858F6D4440, 0xEF73D256A5C0F77C}, // 1e-50 - {0xDDE7001379A44AA8, 0x95A8637627989AAD}, // 1e-49 - {0x5560C018580D5D52, 0xBB127C53B17EC159}, // 1e-48 - {0xAAB8F01E6E10B4A6, 0xE9D71B689DDE71AF}, // 1e-47 - {0xCAB3961304CA70E8, 0x9226712162AB070D}, // 1e-46 - {0x3D607B97C5FD0D22, 0xB6B00D69BB55C8D1}, // 1e-45 - {0x8CB89A7DB77C506A, 0xE45C10C42A2B3B05}, // 1e-44 - {0x77F3608E92ADB242, 0x8EB98A7A9A5B04E3}, // 1e-43 - {0x55F038B237591ED3, 0xB267ED1940F1C61C}, // 1e-42 - {0x6B6C46DEC52F6688, 0xDF01E85F912E37A3}, // 1e-41 - {0x2323AC4B3B3DA015, 0x8B61313BBABCE2C6}, // 1e-40 - {0xABEC975E0A0D081A, 0xAE397D8AA96C1B77}, // 1e-39 - {0x96E7BD358C904A21, 0xD9C7DCED53C72255}, // 1e-38 - {0x7E50D64177DA2E54, 0x881CEA14545C7575}, // 1e-37 - {0xDDE50BD1D5D0B9E9, 0xAA242499697392D2}, // 1e-36 - {0x955E4EC64B44E864, 0xD4AD2DBFC3D07787}, // 1e-35 - {0xBD5AF13BEF0B113E, 0x84EC3C97DA624AB4}, // 1e-34 - {0xECB1AD8AEACDD58E, 0xA6274BBDD0FADD61}, // 1e-33 - {0x67DE18EDA5814AF2, 0xCFB11EAD453994BA}, // 1e-32 - {0x80EACF948770CED7, 0x81CEB32C4B43FCF4}, // 1e-31 - {0xA1258379A94D028D, 0xA2425FF75E14FC31}, // 1e-30 - {0x096EE45813A04330, 0xCAD2F7F5359A3B3E}, // 1e-29 - {0x8BCA9D6E188853FC, 0xFD87B5F28300CA0D}, // 1e-28 - {0x775EA264CF55347D, 0x9E74D1B791E07E48}, // 1e-27 - {0x95364AFE032A819D, 0xC612062576589DDA}, // 1e-26 - {0x3A83DDBD83F52204, 0xF79687AED3EEC551}, // 1e-25 - {0xC4926A9672793542, 0x9ABE14CD44753B52}, // 1e-24 - {0x75B7053C0F178293, 0xC16D9A0095928A27}, // 1e-23 - {0x5324C68B12DD6338, 0xF1C90080BAF72CB1}, // 1e-22 - {0xD3F6FC16EBCA5E03, 0x971DA05074DA7BEE}, // 1e-21 - {0x88F4BB1CA6BCF584, 0xBCE5086492111AEA}, // 1e-20 - {0x2B31E9E3D06C32E5, 0xEC1E4A7DB69561A5}, // 1e-19 - {0x3AFF322E62439FCF, 0x9392EE8E921D5D07}, // 1e-18 - {0x09BEFEB9FAD487C2, 0xB877AA3236A4B449}, // 1e-17 - {0x4C2EBE687989A9B3, 0xE69594BEC44DE15B}, // 1e-16 - {0x0F9D37014BF60A10, 0x901D7CF73AB0ACD9}, // 1e-15 - {0x538484C19EF38C94, 0xB424DC35095CD80F}, // 1e-14 - {0x2865A5F206B06FB9, 0xE12E13424BB40E13}, // 1e-13 - {0xF93F87B7442E45D3, 0x8CBCCC096F5088CB}, // 1e-12 - {0xF78F69A51539D748, 0xAFEBFF0BCB24AAFE}, // 1e-11 - {0xB573440E5A884D1B, 0xDBE6FECEBDEDD5BE}, // 1e-10 - {0x31680A88F8953030, 0x89705F4136B4A597}, // 1e-9 - {0xFDC20D2B36BA7C3D, 0xABCC77118461CEFC}, // 1e-8 - {0x3D32907604691B4C, 0xD6BF94D5E57A42BC}, // 1e-7 - {0xA63F9A49C2C1B10F, 0x8637BD05AF6C69B5}, // 1e-6 - {0x0FCF80DC33721D53, 0xA7C5AC471B478423}, // 1e-5 - {0xD3C36113404EA4A8, 0xD1B71758E219652B}, // 1e-4 - {0x645A1CAC083126E9, 0x83126E978D4FDF3B}, // 1e-3 - {0x3D70A3D70A3D70A3, 0xA3D70A3D70A3D70A}, // 1e-2 - {0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC}, // 1e-1 - {0x0000000000000000, 0x8000000000000000}, // 1e0 - {0x0000000000000000, 0xA000000000000000}, // 1e1 - {0x0000000000000000, 0xC800000000000000}, // 1e2 - {0x0000000000000000, 0xFA00000000000000}, // 1e3 - {0x0000000000000000, 0x9C40000000000000}, // 1e4 - {0x0000000000000000, 0xC350000000000000}, // 1e5 - {0x0000000000000000, 0xF424000000000000}, // 1e6 - {0x0000000000000000, 0x9896800000000000}, // 1e7 - {0x0000000000000000, 0xBEBC200000000000}, // 1e8 - {0x0000000000000000, 0xEE6B280000000000}, // 1e9 - {0x0000000000000000, 0x9502F90000000000}, // 1e10 - {0x0000000000000000, 0xBA43B74000000000}, // 1e11 - {0x0000000000000000, 0xE8D4A51000000000}, // 1e12 - {0x0000000000000000, 0x9184E72A00000000}, // 1e13 - {0x0000000000000000, 0xB5E620F480000000}, // 1e14 - {0x0000000000000000, 0xE35FA931A0000000}, // 1e15 - {0x0000000000000000, 0x8E1BC9BF04000000}, // 1e16 - {0x0000000000000000, 0xB1A2BC2EC5000000}, // 1e17 - {0x0000000000000000, 0xDE0B6B3A76400000}, // 1e18 - {0x0000000000000000, 0x8AC7230489E80000}, // 1e19 - {0x0000000000000000, 0xAD78EBC5AC620000}, // 1e20 - {0x0000000000000000, 0xD8D726B7177A8000}, // 1e21 - {0x0000000000000000, 0x878678326EAC9000}, // 1e22 - {0x0000000000000000, 0xA968163F0A57B400}, // 1e23 - {0x0000000000000000, 0xD3C21BCECCEDA100}, // 1e24 - {0x0000000000000000, 0x84595161401484A0}, // 1e25 - {0x0000000000000000, 0xA56FA5B99019A5C8}, // 1e26 - {0x0000000000000000, 0xCECB8F27F4200F3A}, // 1e27 - {0x4000000000000000, 0x813F3978F8940984}, // 1e28 - {0x5000000000000000, 0xA18F07D736B90BE5}, // 1e29 - {0xA400000000000000, 0xC9F2C9CD04674EDE}, // 1e30 - {0x4D00000000000000, 0xFC6F7C4045812296}, // 1e31 - {0xF020000000000000, 0x9DC5ADA82B70B59D}, // 1e32 - {0x6C28000000000000, 0xC5371912364CE305}, // 1e33 - {0xC732000000000000, 0xF684DF56C3E01BC6}, // 1e34 - {0x3C7F400000000000, 0x9A130B963A6C115C}, // 1e35 - {0x4B9F100000000000, 0xC097CE7BC90715B3}, // 1e36 - {0x1E86D40000000000, 0xF0BDC21ABB48DB20}, // 1e37 - {0x1314448000000000, 0x96769950B50D88F4}, // 1e38 - {0x17D955A000000000, 0xBC143FA4E250EB31}, // 1e39 - {0x5DCFAB0800000000, 0xEB194F8E1AE525FD}, // 1e40 - {0x5AA1CAE500000000, 0x92EFD1B8D0CF37BE}, // 1e41 - {0xF14A3D9E40000000, 0xB7ABC627050305AD}, // 1e42 - {0x6D9CCD05D0000000, 0xE596B7B0C643C719}, // 1e43 - {0xE4820023A2000000, 0x8F7E32CE7BEA5C6F}, // 1e44 - {0xDDA2802C8A800000, 0xB35DBF821AE4F38B}, // 1e45 - {0xD50B2037AD200000, 0xE0352F62A19E306E}, // 1e46 - {0x4526F422CC340000, 0x8C213D9DA502DE45}, // 1e47 - {0x9670B12B7F410000, 0xAF298D050E4395D6}, // 1e48 - {0x3C0CDD765F114000, 0xDAF3F04651D47B4C}, // 1e49 - {0xA5880A69FB6AC800, 0x88D8762BF324CD0F}, // 1e50 - {0x8EEA0D047A457A00, 0xAB0E93B6EFEE0053}, // 1e51 - {0x72A4904598D6D880, 0xD5D238A4ABE98068}, // 1e52 - {0x47A6DA2B7F864750, 0x85A36366EB71F041}, // 1e53 - {0x999090B65F67D924, 0xA70C3C40A64E6C51}, // 1e54 - {0xFFF4B4E3F741CF6D, 0xD0CF4B50CFE20765}, // 1e55 - {0xBFF8F10E7A8921A4, 0x82818F1281ED449F}, // 1e56 - {0xAFF72D52192B6A0D, 0xA321F2D7226895C7}, // 1e57 - {0x9BF4F8A69F764490, 0xCBEA6F8CEB02BB39}, // 1e58 - {0x02F236D04753D5B4, 0xFEE50B7025C36A08}, // 1e59 - {0x01D762422C946590, 0x9F4F2726179A2245}, // 1e60 - {0x424D3AD2B7B97EF5, 0xC722F0EF9D80AAD6}, // 1e61 - {0xD2E0898765A7DEB2, 0xF8EBAD2B84E0D58B}, // 1e62 - {0x63CC55F49F88EB2F, 0x9B934C3B330C8577}, // 1e63 - {0x3CBF6B71C76B25FB, 0xC2781F49FFCFA6D5}, // 1e64 - {0x8BEF464E3945EF7A, 0xF316271C7FC3908A}, // 1e65 - {0x97758BF0E3CBB5AC, 0x97EDD871CFDA3A56}, // 1e66 - {0x3D52EEED1CBEA317, 0xBDE94E8E43D0C8EC}, // 1e67 - {0x4CA7AAA863EE4BDD, 0xED63A231D4C4FB27}, // 1e68 - {0x8FE8CAA93E74EF6A, 0x945E455F24FB1CF8}, // 1e69 - {0xB3E2FD538E122B44, 0xB975D6B6EE39E436}, // 1e70 - {0x60DBBCA87196B616, 0xE7D34C64A9C85D44}, // 1e71 - {0xBC8955E946FE31CD, 0x90E40FBEEA1D3A4A}, // 1e72 - {0x6BABAB6398BDBE41, 0xB51D13AEA4A488DD}, // 1e73 - {0xC696963C7EED2DD1, 0xE264589A4DCDAB14}, // 1e74 - {0xFC1E1DE5CF543CA2, 0x8D7EB76070A08AEC}, // 1e75 - {0x3B25A55F43294BCB, 0xB0DE65388CC8ADA8}, // 1e76 - {0x49EF0EB713F39EBE, 0xDD15FE86AFFAD912}, // 1e77 - {0x6E3569326C784337, 0x8A2DBF142DFCC7AB}, // 1e78 - {0x49C2C37F07965404, 0xACB92ED9397BF996}, // 1e79 - {0xDC33745EC97BE906, 0xD7E77A8F87DAF7FB}, // 1e80 - {0x69A028BB3DED71A3, 0x86F0AC99B4E8DAFD}, // 1e81 - {0xC40832EA0D68CE0C, 0xA8ACD7C0222311BC}, // 1e82 - {0xF50A3FA490C30190, 0xD2D80DB02AABD62B}, // 1e83 - {0x792667C6DA79E0FA, 0x83C7088E1AAB65DB}, // 1e84 - {0x577001B891185938, 0xA4B8CAB1A1563F52}, // 1e85 - {0xED4C0226B55E6F86, 0xCDE6FD5E09ABCF26}, // 1e86 - {0x544F8158315B05B4, 0x80B05E5AC60B6178}, // 1e87 - {0x696361AE3DB1C721, 0xA0DC75F1778E39D6}, // 1e88 - {0x03BC3A19CD1E38E9, 0xC913936DD571C84C}, // 1e89 - {0x04AB48A04065C723, 0xFB5878494ACE3A5F}, // 1e90 - {0x62EB0D64283F9C76, 0x9D174B2DCEC0E47B}, // 1e91 - {0x3BA5D0BD324F8394, 0xC45D1DF942711D9A}, // 1e92 - {0xCA8F44EC7EE36479, 0xF5746577930D6500}, // 1e93 - {0x7E998B13CF4E1ECB, 0x9968BF6ABBE85F20}, // 1e94 - {0x9E3FEDD8C321A67E, 0xBFC2EF456AE276E8}, // 1e95 - {0xC5CFE94EF3EA101E, 0xEFB3AB16C59B14A2}, // 1e96 - {0xBBA1F1D158724A12, 0x95D04AEE3B80ECE5}, // 1e97 - {0x2A8A6E45AE8EDC97, 0xBB445DA9CA61281F}, // 1e98 - {0xF52D09D71A3293BD, 0xEA1575143CF97226}, // 1e99 - {0x593C2626705F9C56, 0x924D692CA61BE758}, // 1e100 - {0x6F8B2FB00C77836C, 0xB6E0C377CFA2E12E}, // 1e101 - {0x0B6DFB9C0F956447, 0xE498F455C38B997A}, // 1e102 - {0x4724BD4189BD5EAC, 0x8EDF98B59A373FEC}, // 1e103 - {0x58EDEC91EC2CB657, 0xB2977EE300C50FE7}, // 1e104 - {0x2F2967B66737E3ED, 0xDF3D5E9BC0F653E1}, // 1e105 - {0xBD79E0D20082EE74, 0x8B865B215899F46C}, // 1e106 - {0xECD8590680A3AA11, 0xAE67F1E9AEC07187}, // 1e107 - {0xE80E6F4820CC9495, 0xDA01EE641A708DE9}, // 1e108 - {0x3109058D147FDCDD, 0x884134FE908658B2}, // 1e109 - {0xBD4B46F0599FD415, 0xAA51823E34A7EEDE}, // 1e110 - {0x6C9E18AC7007C91A, 0xD4E5E2CDC1D1EA96}, // 1e111 - {0x03E2CF6BC604DDB0, 0x850FADC09923329E}, // 1e112 - {0x84DB8346B786151C, 0xA6539930BF6BFF45}, // 1e113 - {0xE612641865679A63, 0xCFE87F7CEF46FF16}, // 1e114 - {0x4FCB7E8F3F60C07E, 0x81F14FAE158C5F6E}, // 1e115 - {0xE3BE5E330F38F09D, 0xA26DA3999AEF7749}, // 1e116 - {0x5CADF5BFD3072CC5, 0xCB090C8001AB551C}, // 1e117 - {0x73D9732FC7C8F7F6, 0xFDCB4FA002162A63}, // 1e118 - {0x2867E7FDDCDD9AFA, 0x9E9F11C4014DDA7E}, // 1e119 - {0xB281E1FD541501B8, 0xC646D63501A1511D}, // 1e120 - {0x1F225A7CA91A4226, 0xF7D88BC24209A565}, // 1e121 - {0x3375788DE9B06958, 0x9AE757596946075F}, // 1e122 - {0x0052D6B1641C83AE, 0xC1A12D2FC3978937}, // 1e123 - {0xC0678C5DBD23A49A, 0xF209787BB47D6B84}, // 1e124 - {0xF840B7BA963646E0, 0x9745EB4D50CE6332}, // 1e125 - {0xB650E5A93BC3D898, 0xBD176620A501FBFF}, // 1e126 - {0xA3E51F138AB4CEBE, 0xEC5D3FA8CE427AFF}, // 1e127 - {0xC66F336C36B10137, 0x93BA47C980E98CDF}, // 1e128 - {0xB80B0047445D4184, 0xB8A8D9BBE123F017}, // 1e129 - {0xA60DC059157491E5, 0xE6D3102AD96CEC1D}, // 1e130 - {0x87C89837AD68DB2F, 0x9043EA1AC7E41392}, // 1e131 - {0x29BABE4598C311FB, 0xB454E4A179DD1877}, // 1e132 - {0xF4296DD6FEF3D67A, 0xE16A1DC9D8545E94}, // 1e133 - {0x1899E4A65F58660C, 0x8CE2529E2734BB1D}, // 1e134 - {0x5EC05DCFF72E7F8F, 0xB01AE745B101E9E4}, // 1e135 - {0x76707543F4FA1F73, 0xDC21A1171D42645D}, // 1e136 - {0x6A06494A791C53A8, 0x899504AE72497EBA}, // 1e137 - {0x0487DB9D17636892, 0xABFA45DA0EDBDE69}, // 1e138 - {0x45A9D2845D3C42B6, 0xD6F8D7509292D603}, // 1e139 - {0x0B8A2392BA45A9B2, 0x865B86925B9BC5C2}, // 1e140 - {0x8E6CAC7768D7141E, 0xA7F26836F282B732}, // 1e141 - {0x3207D795430CD926, 0xD1EF0244AF2364FF}, // 1e142 - {0x7F44E6BD49E807B8, 0x8335616AED761F1F}, // 1e143 - {0x5F16206C9C6209A6, 0xA402B9C5A8D3A6E7}, // 1e144 - {0x36DBA887C37A8C0F, 0xCD036837130890A1}, // 1e145 - {0xC2494954DA2C9789, 0x802221226BE55A64}, // 1e146 - {0xF2DB9BAA10B7BD6C, 0xA02AA96B06DEB0FD}, // 1e147 - {0x6F92829494E5ACC7, 0xC83553C5C8965D3D}, // 1e148 - {0xCB772339BA1F17F9, 0xFA42A8B73ABBF48C}, // 1e149 - {0xFF2A760414536EFB, 0x9C69A97284B578D7}, // 1e150 - {0xFEF5138519684ABA, 0xC38413CF25E2D70D}, // 1e151 - {0x7EB258665FC25D69, 0xF46518C2EF5B8CD1}, // 1e152 - {0xEF2F773FFBD97A61, 0x98BF2F79D5993802}, // 1e153 - {0xAAFB550FFACFD8FA, 0xBEEEFB584AFF8603}, // 1e154 - {0x95BA2A53F983CF38, 0xEEAABA2E5DBF6784}, // 1e155 - {0xDD945A747BF26183, 0x952AB45CFA97A0B2}, // 1e156 - {0x94F971119AEEF9E4, 0xBA756174393D88DF}, // 1e157 - {0x7A37CD5601AAB85D, 0xE912B9D1478CEB17}, // 1e158 - {0xAC62E055C10AB33A, 0x91ABB422CCB812EE}, // 1e159 - {0x577B986B314D6009, 0xB616A12B7FE617AA}, // 1e160 - {0xED5A7E85FDA0B80B, 0xE39C49765FDF9D94}, // 1e161 - {0x14588F13BE847307, 0x8E41ADE9FBEBC27D}, // 1e162 - {0x596EB2D8AE258FC8, 0xB1D219647AE6B31C}, // 1e163 - {0x6FCA5F8ED9AEF3BB, 0xDE469FBD99A05FE3}, // 1e164 - {0x25DE7BB9480D5854, 0x8AEC23D680043BEE}, // 1e165 - {0xAF561AA79A10AE6A, 0xADA72CCC20054AE9}, // 1e166 - {0x1B2BA1518094DA04, 0xD910F7FF28069DA4}, // 1e167 - {0x90FB44D2F05D0842, 0x87AA9AFF79042286}, // 1e168 - {0x353A1607AC744A53, 0xA99541BF57452B28}, // 1e169 - {0x42889B8997915CE8, 0xD3FA922F2D1675F2}, // 1e170 - {0x69956135FEBADA11, 0x847C9B5D7C2E09B7}, // 1e171 - {0x43FAB9837E699095, 0xA59BC234DB398C25}, // 1e172 - {0x94F967E45E03F4BB, 0xCF02B2C21207EF2E}, // 1e173 - {0x1D1BE0EEBAC278F5, 0x8161AFB94B44F57D}, // 1e174 - {0x6462D92A69731732, 0xA1BA1BA79E1632DC}, // 1e175 - {0x7D7B8F7503CFDCFE, 0xCA28A291859BBF93}, // 1e176 - {0x5CDA735244C3D43E, 0xFCB2CB35E702AF78}, // 1e177 - {0x3A0888136AFA64A7, 0x9DEFBF01B061ADAB}, // 1e178 - {0x088AAA1845B8FDD0, 0xC56BAEC21C7A1916}, // 1e179 - {0x8AAD549E57273D45, 0xF6C69A72A3989F5B}, // 1e180 - {0x36AC54E2F678864B, 0x9A3C2087A63F6399}, // 1e181 - {0x84576A1BB416A7DD, 0xC0CB28A98FCF3C7F}, // 1e182 - {0x656D44A2A11C51D5, 0xF0FDF2D3F3C30B9F}, // 1e183 - {0x9F644AE5A4B1B325, 0x969EB7C47859E743}, // 1e184 - {0x873D5D9F0DDE1FEE, 0xBC4665B596706114}, // 1e185 - {0xA90CB506D155A7EA, 0xEB57FF22FC0C7959}, // 1e186 - {0x09A7F12442D588F2, 0x9316FF75DD87CBD8}, // 1e187 - {0x0C11ED6D538AEB2F, 0xB7DCBF5354E9BECE}, // 1e188 - {0x8F1668C8A86DA5FA, 0xE5D3EF282A242E81}, // 1e189 - {0xF96E017D694487BC, 0x8FA475791A569D10}, // 1e190 - {0x37C981DCC395A9AC, 0xB38D92D760EC4455}, // 1e191 - {0x85BBE253F47B1417, 0xE070F78D3927556A}, // 1e192 - {0x93956D7478CCEC8E, 0x8C469AB843B89562}, // 1e193 - {0x387AC8D1970027B2, 0xAF58416654A6BABB}, // 1e194 - {0x06997B05FCC0319E, 0xDB2E51BFE9D0696A}, // 1e195 - {0x441FECE3BDF81F03, 0x88FCF317F22241E2}, // 1e196 - {0xD527E81CAD7626C3, 0xAB3C2FDDEEAAD25A}, // 1e197 - {0x8A71E223D8D3B074, 0xD60B3BD56A5586F1}, // 1e198 - {0xF6872D5667844E49, 0x85C7056562757456}, // 1e199 - {0xB428F8AC016561DB, 0xA738C6BEBB12D16C}, // 1e200 - {0xE13336D701BEBA52, 0xD106F86E69D785C7}, // 1e201 - {0xECC0024661173473, 0x82A45B450226B39C}, // 1e202 - {0x27F002D7F95D0190, 0xA34D721642B06084}, // 1e203 - {0x31EC038DF7B441F4, 0xCC20CE9BD35C78A5}, // 1e204 - {0x7E67047175A15271, 0xFF290242C83396CE}, // 1e205 - {0x0F0062C6E984D386, 0x9F79A169BD203E41}, // 1e206 - {0x52C07B78A3E60868, 0xC75809C42C684DD1}, // 1e207 - {0xA7709A56CCDF8A82, 0xF92E0C3537826145}, // 1e208 - {0x88A66076400BB691, 0x9BBCC7A142B17CCB}, // 1e209 - {0x6ACFF893D00EA435, 0xC2ABF989935DDBFE}, // 1e210 - {0x0583F6B8C4124D43, 0xF356F7EBF83552FE}, // 1e211 - {0xC3727A337A8B704A, 0x98165AF37B2153DE}, // 1e212 - {0x744F18C0592E4C5C, 0xBE1BF1B059E9A8D6}, // 1e213 - {0x1162DEF06F79DF73, 0xEDA2EE1C7064130C}, // 1e214 - {0x8ADDCB5645AC2BA8, 0x9485D4D1C63E8BE7}, // 1e215 - {0x6D953E2BD7173692, 0xB9A74A0637CE2EE1}, // 1e216 - {0xC8FA8DB6CCDD0437, 0xE8111C87C5C1BA99}, // 1e217 - {0x1D9C9892400A22A2, 0x910AB1D4DB9914A0}, // 1e218 - {0x2503BEB6D00CAB4B, 0xB54D5E4A127F59C8}, // 1e219 - {0x2E44AE64840FD61D, 0xE2A0B5DC971F303A}, // 1e220 - {0x5CEAECFED289E5D2, 0x8DA471A9DE737E24}, // 1e221 - {0x7425A83E872C5F47, 0xB10D8E1456105DAD}, // 1e222 - {0xD12F124E28F77719, 0xDD50F1996B947518}, // 1e223 - {0x82BD6B70D99AAA6F, 0x8A5296FFE33CC92F}, // 1e224 - {0x636CC64D1001550B, 0xACE73CBFDC0BFB7B}, // 1e225 - {0x3C47F7E05401AA4E, 0xD8210BEFD30EFA5A}, // 1e226 - {0x65ACFAEC34810A71, 0x8714A775E3E95C78}, // 1e227 - {0x7F1839A741A14D0D, 0xA8D9D1535CE3B396}, // 1e228 - {0x1EDE48111209A050, 0xD31045A8341CA07C}, // 1e229 - {0x934AED0AAB460432, 0x83EA2B892091E44D}, // 1e230 - {0xF81DA84D5617853F, 0xA4E4B66B68B65D60}, // 1e231 - {0x36251260AB9D668E, 0xCE1DE40642E3F4B9}, // 1e232 - {0xC1D72B7C6B426019, 0x80D2AE83E9CE78F3}, // 1e233 - {0xB24CF65B8612F81F, 0xA1075A24E4421730}, // 1e234 - {0xDEE033F26797B627, 0xC94930AE1D529CFC}, // 1e235 - {0x169840EF017DA3B1, 0xFB9B7CD9A4A7443C}, // 1e236 - {0x8E1F289560EE864E, 0x9D412E0806E88AA5}, // 1e237 - {0xF1A6F2BAB92A27E2, 0xC491798A08A2AD4E}, // 1e238 - {0xAE10AF696774B1DB, 0xF5B5D7EC8ACB58A2}, // 1e239 - {0xACCA6DA1E0A8EF29, 0x9991A6F3D6BF1765}, // 1e240 - {0x17FD090A58D32AF3, 0xBFF610B0CC6EDD3F}, // 1e241 - {0xDDFC4B4CEF07F5B0, 0xEFF394DCFF8A948E}, // 1e242 - {0x4ABDAF101564F98E, 0x95F83D0A1FB69CD9}, // 1e243 - {0x9D6D1AD41ABE37F1, 0xBB764C4CA7A4440F}, // 1e244 - {0x84C86189216DC5ED, 0xEA53DF5FD18D5513}, // 1e245 - {0x32FD3CF5B4E49BB4, 0x92746B9BE2F8552C}, // 1e246 - {0x3FBC8C33221DC2A1, 0xB7118682DBB66A77}, // 1e247 - {0x0FABAF3FEAA5334A, 0xE4D5E82392A40515}, // 1e248 - {0x29CB4D87F2A7400E, 0x8F05B1163BA6832D}, // 1e249 - {0x743E20E9EF511012, 0xB2C71D5BCA9023F8}, // 1e250 - {0x914DA9246B255416, 0xDF78E4B2BD342CF6}, // 1e251 - {0x1AD089B6C2F7548E, 0x8BAB8EEFB6409C1A}, // 1e252 - {0xA184AC2473B529B1, 0xAE9672ABA3D0C320}, // 1e253 - {0xC9E5D72D90A2741E, 0xDA3C0F568CC4F3E8}, // 1e254 - {0x7E2FA67C7A658892, 0x8865899617FB1871}, // 1e255 - {0xDDBB901B98FEEAB7, 0xAA7EEBFB9DF9DE8D}, // 1e256 - {0x552A74227F3EA565, 0xD51EA6FA85785631}, // 1e257 - {0xD53A88958F87275F, 0x8533285C936B35DE}, // 1e258 - {0x8A892ABAF368F137, 0xA67FF273B8460356}, // 1e259 - {0x2D2B7569B0432D85, 0xD01FEF10A657842C}, // 1e260 - {0x9C3B29620E29FC73, 0x8213F56A67F6B29B}, // 1e261 - {0x8349F3BA91B47B8F, 0xA298F2C501F45F42}, // 1e262 - {0x241C70A936219A73, 0xCB3F2F7642717713}, // 1e263 - {0xED238CD383AA0110, 0xFE0EFB53D30DD4D7}, // 1e264 - {0xF4363804324A40AA, 0x9EC95D1463E8A506}, // 1e265 - {0xB143C6053EDCD0D5, 0xC67BB4597CE2CE48}, // 1e266 - {0xDD94B7868E94050A, 0xF81AA16FDC1B81DA}, // 1e267 - {0xCA7CF2B4191C8326, 0x9B10A4E5E9913128}, // 1e268 - {0xFD1C2F611F63A3F0, 0xC1D4CE1F63F57D72}, // 1e269 - {0xBC633B39673C8CEC, 0xF24A01A73CF2DCCF}, // 1e270 - {0xD5BE0503E085D813, 0x976E41088617CA01}, // 1e271 - {0x4B2D8644D8A74E18, 0xBD49D14AA79DBC82}, // 1e272 - {0xDDF8E7D60ED1219E, 0xEC9C459D51852BA2}, // 1e273 - {0xCABB90E5C942B503, 0x93E1AB8252F33B45}, // 1e274 - {0x3D6A751F3B936243, 0xB8DA1662E7B00A17}, // 1e275 - {0x0CC512670A783AD4, 0xE7109BFBA19C0C9D}, // 1e276 - {0x27FB2B80668B24C5, 0x906A617D450187E2}, // 1e277 - {0xB1F9F660802DEDF6, 0xB484F9DC9641E9DA}, // 1e278 - {0x5E7873F8A0396973, 0xE1A63853BBD26451}, // 1e279 - {0xDB0B487B6423E1E8, 0x8D07E33455637EB2}, // 1e280 - {0x91CE1A9A3D2CDA62, 0xB049DC016ABC5E5F}, // 1e281 - {0x7641A140CC7810FB, 0xDC5C5301C56B75F7}, // 1e282 - {0xA9E904C87FCB0A9D, 0x89B9B3E11B6329BA}, // 1e283 - {0x546345FA9FBDCD44, 0xAC2820D9623BF429}, // 1e284 - {0xA97C177947AD4095, 0xD732290FBACAF133}, // 1e285 - {0x49ED8EABCCCC485D, 0x867F59A9D4BED6C0}, // 1e286 - {0x5C68F256BFFF5A74, 0xA81F301449EE8C70}, // 1e287 - {0x73832EEC6FFF3111, 0xD226FC195C6A2F8C}, // 1e288 - {0xC831FD53C5FF7EAB, 0x83585D8FD9C25DB7}, // 1e289 - {0xBA3E7CA8B77F5E55, 0xA42E74F3D032F525}, // 1e290 - {0x28CE1BD2E55F35EB, 0xCD3A1230C43FB26F}, // 1e291 - {0x7980D163CF5B81B3, 0x80444B5E7AA7CF85}, // 1e292 - {0xD7E105BCC332621F, 0xA0555E361951C366}, // 1e293 - {0x8DD9472BF3FEFAA7, 0xC86AB5C39FA63440}, // 1e294 - {0xB14F98F6F0FEB951, 0xFA856334878FC150}, // 1e295 - {0x6ED1BF9A569F33D3, 0x9C935E00D4B9D8D2}, // 1e296 - {0x0A862F80EC4700C8, 0xC3B8358109E84F07}, // 1e297 - {0xCD27BB612758C0FA, 0xF4A642E14C6262C8}, // 1e298 - {0x8038D51CB897789C, 0x98E7E9CCCFBD7DBD}, // 1e299 - {0xE0470A63E6BD56C3, 0xBF21E44003ACDD2C}, // 1e300 - {0x1858CCFCE06CAC74, 0xEEEA5D5004981478}, // 1e301 - {0x0F37801E0C43EBC8, 0x95527A5202DF0CCB}, // 1e302 - {0xD30560258F54E6BA, 0xBAA718E68396CFFD}, // 1e303 - {0x47C6B82EF32A2069, 0xE950DF20247C83FD}, // 1e304 - {0x4CDC331D57FA5441, 0x91D28B7416CDD27E}, // 1e305 - {0xE0133FE4ADF8E952, 0xB6472E511C81471D}, // 1e306 - {0x58180FDDD97723A6, 0xE3D8F9E563A198E5}, // 1e307 - {0x570F09EAA7EA7648, 0x8E679C2F5E44FF8F}, // 1e308 - {0x2CD2CC6551E513DA, 0xB201833B35D63F73}, // 1e309 - {0xF8077F7EA65E58D1, 0xDE81E40A034BCF4F}, // 1e310 - {0xFB04AFAF27FAF782, 0x8B112E86420F6191}, // 1e311 - {0x79C5DB9AF1F9B563, 0xADD57A27D29339F6}, // 1e312 - {0x18375281AE7822BC, 0xD94AD8B1C7380874}, // 1e313 - {0x8F2293910D0B15B5, 0x87CEC76F1C830548}, // 1e314 - {0xB2EB3875504DDB22, 0xA9C2794AE3A3C69A}, // 1e315 - {0x5FA60692A46151EB, 0xD433179D9C8CB841}, // 1e316 - {0xDBC7C41BA6BCD333, 0x849FEEC281D7F328}, // 1e317 - {0x12B9B522906C0800, 0xA5C7EA73224DEFF3}, // 1e318 - {0xD768226B34870A00, 0xCF39E50FEAE16BEF}, // 1e319 - {0xE6A1158300D46640, 0x81842F29F2CCE375}, // 1e320 - {0x60495AE3C1097FD0, 0xA1E53AF46F801C53}, // 1e321 - {0x385BB19CB14BDFC4, 0xCA5E89B18B602368}, // 1e322 - {0x46729E03DD9ED7B5, 0xFCF62C1DEE382C42}, // 1e323 - {0x6C07A2C26A8346D1, 0x9E19DB92B4E31BA9}, // 1e324 - {0xC7098B7305241885, 0xC5A05277621BE293}, // 1e325 - {0xB8CBEE4FC66D1EA7, 0xF70867153AA2DB38}, // 1e326 - {0x737F74F1DC043328, 0x9A65406D44A5C903}, // 1e327 - {0x505F522E53053FF2, 0xC0FE908895CF3B44}, // 1e328 - {0x647726B9E7C68FEF, 0xF13E34AABB430A15}, // 1e329 - {0x5ECA783430DC19F5, 0x96C6E0EAB509E64D}, // 1e330 - {0xB67D16413D132072, 0xBC789925624C5FE0}, // 1e331 - {0xE41C5BD18C57E88F, 0xEB96BF6EBADF77D8}, // 1e332 - {0x8E91B962F7B6F159, 0x933E37A534CBAAE7}, // 1e333 - {0x723627BBB5A4ADB0, 0xB80DC58E81FE95A1}, // 1e334 - {0xCEC3B1AAA30DD91C, 0xE61136F2227E3B09}, // 1e335 - {0x213A4F0AA5E8A7B1, 0x8FCAC257558EE4E6}, // 1e336 - {0xA988E2CD4F62D19D, 0xB3BD72ED2AF29E1F}, // 1e337 - {0x93EB1B80A33B8605, 0xE0ACCFA875AF45A7}, // 1e338 - {0xBC72F130660533C3, 0x8C6C01C9498D8B88}, // 1e339 - {0xEB8FAD7C7F8680B4, 0xAF87023B9BF0EE6A}, // 1e340 - {0xA67398DB9F6820E1, 0xDB68C2CA82ED2A05}, // 1e341 - {0x88083F8943A1148C, 0x892179BE91D43A43}, // 1e342 - {0x6A0A4F6B948959B0, 0xAB69D82E364948D4}, // 1e343 - {0x848CE34679ABB01C, 0xD6444E39C3DB9B09}, // 1e344 - {0xF2D80E0C0C0B4E11, 0x85EAB0E41A6940E5}, // 1e345 - {0x6F8E118F0F0E2195, 0xA7655D1D2103911F}, // 1e346 - {0x4B7195F2D2D1A9FB, 0xD13EB46469447567}, // 1e347 -} diff --git a/src/strconv/export_test.go b/src/strconv/export_test.go index 8c03a7ffb4f..7a3c761e683 100644 --- a/src/strconv/export_test.go +++ b/src/strconv/export_test.go @@ -5,6 +5,6 @@ package strconv var ( - BitSizeError = bitSizeError BaseError = baseError + BitSizeError = bitSizeError ) diff --git a/src/strconv/ftoaryu.go b/src/strconv/ftoaryu.go deleted file mode 100644 index 2e7bf71df0b..00000000000 --- a/src/strconv/ftoaryu.go +++ /dev/null @@ -1,569 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package strconv - -import ( - "math/bits" -) - -// binary to decimal conversion using the Ryū algorithm. -// -// See Ulf Adams, "Ryū: Fast Float-to-String Conversion" (doi:10.1145/3192366.3192369) -// -// Fixed precision formatting is a variant of the original paper's -// algorithm, where a single multiplication by 10^k is required, -// sharing the same rounding guarantees. - -// ryuFtoaFixed32 formats mant*(2^exp) with prec decimal digits. -func ryuFtoaFixed32(d *decimalSlice, mant uint32, exp int, prec int) { - if prec < 0 { - panic("ryuFtoaFixed32 called with negative prec") - } - if prec > 9 { - panic("ryuFtoaFixed32 called with prec > 9") - } - // Zero input. - if mant == 0 { - d.nd, d.dp = 0, 0 - return - } - // Renormalize to a 25-bit mantissa. - e2 := exp - if b := bits.Len32(mant); b < 25 { - mant <<= uint(25 - b) - e2 += b - 25 - } - // Choose an exponent such that rounded mant*(2^e2)*(10^q) has - // at least prec decimal digits, i.e - // mant*(2^e2)*(10^q) >= 10^(prec-1) - // Because mant >= 2^24, it is enough to choose: - // 2^(e2+24) >= 10^(-q+prec-1) - // or q = -mulByLog2Log10(e2+24) + prec - 1 - q := -mulByLog2Log10(e2+24) + prec - 1 - - // Now compute mant*(2^e2)*(10^q). - // Is it an exact computation? - // Only small positive powers of 10 are exact (5^28 has 66 bits). - exact := q <= 27 && q >= 0 - - di, dexp2, d0 := mult64bitPow10(mant, e2, q) - if dexp2 >= 0 { - panic("not enough significant bits after mult64bitPow10") - } - // As a special case, computation might still be exact, if exponent - // was negative and if it amounts to computing an exact division. - // In that case, we ignore all lower bits. - // Note that division by 10^11 cannot be exact as 5^11 has 26 bits. - if q < 0 && q >= -10 && divisibleByPower5(uint64(mant), -q) { - exact = true - d0 = true - } - // Remove extra lower bits and keep rounding info. - extra := uint(-dexp2) - extraMask := uint32(1<>extra, di&extraMask - roundUp := false - if exact { - // If we computed an exact product, d + 1/2 - // should round to d+1 if 'd' is odd. - roundUp = dfrac > 1<<(extra-1) || - (dfrac == 1<<(extra-1) && !d0) || - (dfrac == 1<<(extra-1) && d0 && di&1 == 1) - } else { - // otherwise, d+1/2 always rounds up because - // we truncated below. - roundUp = dfrac>>(extra-1) == 1 - } - if dfrac != 0 { - d0 = false - } - // Proceed to the requested number of digits - formatDecimal(d, uint64(di), !d0, roundUp, prec) - // Adjust exponent - d.dp -= q -} - -// ryuFtoaFixed64 formats mant*(2^exp) with prec decimal digits. -func ryuFtoaFixed64(d *decimalSlice, mant uint64, exp int, prec int) { - if prec > 18 { - panic("ryuFtoaFixed64 called with prec > 18") - } - // Zero input. - if mant == 0 { - d.nd, d.dp = 0, 0 - return - } - // Renormalize to a 55-bit mantissa. - e2 := exp - if b := bits.Len64(mant); b < 55 { - mant = mant << uint(55-b) - e2 += b - 55 - } - // Choose an exponent such that rounded mant*(2^e2)*(10^q) has - // at least prec decimal digits, i.e - // mant*(2^e2)*(10^q) >= 10^(prec-1) - // Because mant >= 2^54, it is enough to choose: - // 2^(e2+54) >= 10^(-q+prec-1) - // or q = -mulByLog2Log10(e2+54) + prec - 1 - // - // The minimal required exponent is -mulByLog2Log10(1025)+18 = -291 - // The maximal required exponent is mulByLog2Log10(1074)+18 = 342 - q := -mulByLog2Log10(e2+54) + prec - 1 - - // Now compute mant*(2^e2)*(10^q). - // Is it an exact computation? - // Only small positive powers of 10 are exact (5^55 has 128 bits). - exact := q <= 55 && q >= 0 - - di, dexp2, d0 := mult128bitPow10(mant, e2, q) - if dexp2 >= 0 { - panic("not enough significant bits after mult128bitPow10") - } - // As a special case, computation might still be exact, if exponent - // was negative and if it amounts to computing an exact division. - // In that case, we ignore all lower bits. - // Note that division by 10^23 cannot be exact as 5^23 has 54 bits. - if q < 0 && q >= -22 && divisibleByPower5(mant, -q) { - exact = true - d0 = true - } - // Remove extra lower bits and keep rounding info. - extra := uint(-dexp2) - extraMask := uint64(1<>extra, di&extraMask - roundUp := false - if exact { - // If we computed an exact product, d + 1/2 - // should round to d+1 if 'd' is odd. - roundUp = dfrac > 1<<(extra-1) || - (dfrac == 1<<(extra-1) && !d0) || - (dfrac == 1<<(extra-1) && d0 && di&1 == 1) - } else { - // otherwise, d+1/2 always rounds up because - // we truncated below. - roundUp = dfrac>>(extra-1) == 1 - } - if dfrac != 0 { - d0 = false - } - // Proceed to the requested number of digits - formatDecimal(d, di, !d0, roundUp, prec) - // Adjust exponent - d.dp -= q -} - -var uint64pow10 = [...]uint64{ - 1, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, - 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, -} - -// formatDecimal fills d with at most prec decimal digits -// of mantissa m. The boolean trunc indicates whether m -// is truncated compared to the original number being formatted. -func formatDecimal(d *decimalSlice, m uint64, trunc bool, roundUp bool, prec int) { - max := uint64pow10[prec] - trimmed := 0 - for m >= max { - a, b := m/10, m%10 - m = a - trimmed++ - if b > 5 { - roundUp = true - } else if b < 5 { - roundUp = false - } else { // b == 5 - // round up if there are trailing digits, - // or if the new value of m is odd (round-to-even convention) - roundUp = trunc || m&1 == 1 - } - if b != 0 { - trunc = true - } - } - if roundUp { - m++ - } - if m >= max { - // Happens if di was originally 99999....xx - m /= 10 - trimmed++ - } - // render digits (similar to formatBits) - n := uint(prec) - d.nd = prec - v := m - for v >= 100 { - var v1, v2 uint64 - if v>>32 == 0 { - v1, v2 = uint64(uint32(v)/100), uint64(uint32(v)%100) - } else { - v1, v2 = v/100, v%100 - } - n -= 2 - d.d[n+1] = smallsString[2*v2+1] - d.d[n+0] = smallsString[2*v2+0] - v = v1 - } - if v > 0 { - n-- - d.d[n] = smallsString[2*v+1] - } - if v >= 10 { - n-- - d.d[n] = smallsString[2*v] - } - for d.d[d.nd-1] == '0' { - d.nd-- - trimmed++ - } - d.dp = d.nd + trimmed -} - -// ryuFtoaShortest formats mant*2^exp with prec decimal digits. -func ryuFtoaShortest(d *decimalSlice, mant uint64, exp int, flt *floatInfo) { - if mant == 0 { - d.nd, d.dp = 0, 0 - return - } - // If input is an exact integer with fewer bits than the mantissa, - // the previous and next integer are not admissible representations. - if exp <= 0 && bits.TrailingZeros64(mant) >= -exp { - mant >>= uint(-exp) - ryuDigits(d, mant, mant, mant, true, false) - return - } - ml, mc, mu, e2 := computeBounds(mant, exp, flt) - if e2 == 0 { - ryuDigits(d, ml, mc, mu, true, false) - return - } - // Find 10^q *larger* than 2^-e2 - q := mulByLog2Log10(-e2) + 1 - - // We are going to multiply by 10^q using 128-bit arithmetic. - // The exponent is the same for all 3 numbers. - var dl, dc, du uint64 - var dl0, dc0, du0 bool - if flt == &float32info { - var dl32, dc32, du32 uint32 - dl32, _, dl0 = mult64bitPow10(uint32(ml), e2, q) - dc32, _, dc0 = mult64bitPow10(uint32(mc), e2, q) - du32, e2, du0 = mult64bitPow10(uint32(mu), e2, q) - dl, dc, du = uint64(dl32), uint64(dc32), uint64(du32) - } else { - dl, _, dl0 = mult128bitPow10(ml, e2, q) - dc, _, dc0 = mult128bitPow10(mc, e2, q) - du, e2, du0 = mult128bitPow10(mu, e2, q) - } - if e2 >= 0 { - panic("not enough significant bits after mult128bitPow10") - } - // Is it an exact computation? - if q > 55 { - // Large positive powers of ten are not exact - dl0, dc0, du0 = false, false, false - } - if q < 0 && q >= -24 { - // Division by a power of ten may be exact. - // (note that 5^25 is a 59-bit number so division by 5^25 is never exact). - if divisibleByPower5(ml, -q) { - dl0 = true - } - if divisibleByPower5(mc, -q) { - dc0 = true - } - if divisibleByPower5(mu, -q) { - du0 = true - } - } - // Express the results (dl, dc, du)*2^e2 as integers. - // Extra bits must be removed and rounding hints computed. - extra := uint(-e2) - extraMask := uint64(1<>extra, dl&extraMask - dc, fracc := dc>>extra, dc&extraMask - du, fracu := du>>extra, du&extraMask - // Is it allowed to use 'du' as a result? - // It is always allowed when it is truncated, but also - // if it is exact and the original binary mantissa is even - // When disallowed, we can subtract 1. - uok := !du0 || fracu > 0 - if du0 && fracu == 0 { - uok = mant&1 == 0 - } - if !uok { - du-- - } - // Is 'dc' the correctly rounded base 10 mantissa? - // The correct rounding might be dc+1 - cup := false // don't round up. - if dc0 { - // If we computed an exact product, the half integer - // should round to next (even) integer if 'dc' is odd. - cup = fracc > 1<<(extra-1) || - (fracc == 1<<(extra-1) && dc&1 == 1) - } else { - // otherwise, the result is a lower truncation of the ideal - // result. - cup = fracc>>(extra-1) == 1 - } - // Is 'dl' an allowed representation? - // Only if it is an exact value, and if the original binary mantissa - // was even. - lok := dl0 && fracl == 0 && (mant&1 == 0) - if !lok { - dl++ - } - // We need to remember whether the trimmed digits of 'dc' are zero. - c0 := dc0 && fracc == 0 - // render digits - ryuDigits(d, dl, dc, du, c0, cup) - d.dp -= q -} - -// mulByLog2Log10 returns math.Floor(x * log(2)/log(10)) for an integer x in -// the range -1600 <= x && x <= +1600. -// -// The range restriction lets us work in faster integer arithmetic instead of -// slower floating point arithmetic. Correctness is verified by unit tests. -func mulByLog2Log10(x int) int { - // log(2)/log(10) ≈ 0.30102999566 ≈ 78913 / 2^18 - return (x * 78913) >> 18 -} - -// mulByLog10Log2 returns math.Floor(x * log(10)/log(2)) for an integer x in -// the range -500 <= x && x <= +500. -// -// The range restriction lets us work in faster integer arithmetic instead of -// slower floating point arithmetic. Correctness is verified by unit tests. -func mulByLog10Log2(x int) int { - // log(10)/log(2) ≈ 3.32192809489 ≈ 108853 / 2^15 - return (x * 108853) >> 15 -} - -// computeBounds returns a floating-point vector (l, c, u)×2^e2 -// where the mantissas are 55-bit (or 26-bit) integers, describing the interval -// represented by the input float64 or float32. -func computeBounds(mant uint64, exp int, flt *floatInfo) (lower, central, upper uint64, e2 int) { - if mant != 1< 5e8) || (clo == 5e8 && cup) - ryuDigits32(d, lhi, chi, uhi, c0, cup, 8) - d.dp += 9 - } else { - d.nd = 0 - // emit high part - n := uint(9) - for v := chi; v > 0; { - v1, v2 := v/10, v%10 - v = v1 - n-- - d.d[n] = byte(v2 + '0') - } - d.d = d.d[n:] - d.nd = int(9 - n) - // emit low part - ryuDigits32(d, llo, clo, ulo, - c0, cup, d.nd+8) - } - // trim trailing zeros - for d.nd > 0 && d.d[d.nd-1] == '0' { - d.nd-- - } - // trim initial zeros - for d.nd > 0 && d.d[0] == '0' { - d.nd-- - d.dp-- - d.d = d.d[1:] - } -} - -// ryuDigits32 emits decimal digits for a number less than 1e9. -func ryuDigits32(d *decimalSlice, lower, central, upper uint32, - c0, cup bool, endindex int) { - if upper == 0 { - d.dp = endindex + 1 - return - } - trimmed := 0 - // Remember last trimmed digit to check for round-up. - // c0 will be used to remember zeroness of following digits. - cNextDigit := 0 - for upper > 0 { - // Repeatedly compute: - // l = Ceil(lower / 10^k) - // c = Round(central / 10^k) - // u = Floor(upper / 10^k) - // and stop when c goes out of the (l, u) interval. - l := (lower + 9) / 10 - c, cdigit := central/10, central%10 - u := upper / 10 - if l > u { - // don't trim the last digit as it is forbidden to go below l - // other, trim and exit now. - break - } - // Check that we didn't cross the lower boundary. - // The case where l < u but c == l-1 is essentially impossible, - // but may happen if: - // lower = ..11 - // central = ..19 - // upper = ..31 - // and means that 'central' is very close but less than - // an integer ending with many zeros, and usually - // the "round-up" logic hides the problem. - if l == c+1 && c < u { - c++ - cdigit = 0 - cup = false - } - trimmed++ - // Remember trimmed digits of c - c0 = c0 && cNextDigit == 0 - cNextDigit = int(cdigit) - lower, central, upper = l, c, u - } - // should we round up? - if trimmed > 0 { - cup = cNextDigit > 5 || - (cNextDigit == 5 && !c0) || - (cNextDigit == 5 && c0 && central&1 == 1) - } - if central < upper && cup { - central++ - } - // We know where the number ends, fill directly - endindex -= trimmed - v := central - n := endindex - for n > d.nd { - v1, v2 := v/100, v%100 - d.d[n] = smallsString[2*v2+1] - d.d[n-1] = smallsString[2*v2+0] - n -= 2 - v = v1 - } - if n == d.nd { - d.d[n] = byte(v + '0') - } - d.nd = endindex + 1 - d.dp = d.nd + trimmed -} - -// mult64bitPow10 takes a floating-point input with a 25-bit -// mantissa and multiplies it with 10^q. The resulting mantissa -// is m*P >> 57 where P is a 64-bit element of the detailedPowersOfTen tables. -// It is typically 31 or 32-bit wide. -// The returned boolean is true if all trimmed bits were zero. -// -// That is: -// -// m*2^e2 * round(10^q) = resM * 2^resE + ε -// exact = ε == 0 -func mult64bitPow10(m uint32, e2, q int) (resM uint32, resE int, exact bool) { - if q == 0 { - // P == 1<<63 - return m << 6, e2 - 6, true - } - if q < detailedPowersOfTenMinExp10 || detailedPowersOfTenMaxExp10 < q { - // This never happens due to the range of float32/float64 exponent - panic("mult64bitPow10: power of 10 is out of range") - } - pow := detailedPowersOfTen[q-detailedPowersOfTenMinExp10][1] - if q < 0 { - // Inverse powers of ten must be rounded up. - pow += 1 - } - hi, lo := bits.Mul64(uint64(m), pow) - e2 += mulByLog10Log2(q) - 63 + 57 - return uint32(hi<<7 | lo>>57), e2, lo<<7 == 0 -} - -// mult128bitPow10 takes a floating-point input with a 55-bit -// mantissa and multiplies it with 10^q. The resulting mantissa -// is m*P >> 119 where P is a 128-bit element of the detailedPowersOfTen tables. -// It is typically 63 or 64-bit wide. -// The returned boolean is true is all trimmed bits were zero. -// -// That is: -// -// m*2^e2 * round(10^q) = resM * 2^resE + ε -// exact = ε == 0 -func mult128bitPow10(m uint64, e2, q int) (resM uint64, resE int, exact bool) { - if q == 0 { - // P == 1<<127 - return m << 8, e2 - 8, true - } - if q < detailedPowersOfTenMinExp10 || detailedPowersOfTenMaxExp10 < q { - // This never happens due to the range of float32/float64 exponent - panic("mult128bitPow10: power of 10 is out of range") - } - pow := detailedPowersOfTen[q-detailedPowersOfTenMinExp10] - if q < 0 { - // Inverse powers of ten must be rounded up. - pow[0] += 1 - } - e2 += mulByLog10Log2(q) - 127 + 119 - - // long multiplication - l1, l0 := bits.Mul64(m, pow[0]) - h1, h0 := bits.Mul64(m, pow[1]) - mid, carry := bits.Add64(l1, h0, 0) - h1 += carry - return h1<<9 | mid>>55, e2, mid<<9 == 0 && l0 == 0 -} - -func divisibleByPower5(m uint64, k int) bool { - if m == 0 { - return true - } - for i := 0; i < k; i++ { - if m%5 != 0 { - return false - } - m /= 5 - } - return true -} - -// divmod1e9 computes quotient and remainder of division by 1e9, -// avoiding runtime uint64 division on 32-bit platforms. -func divmod1e9(x uint64) (uint32, uint32) { - if !host32bit { - return uint32(x / 1e9), uint32(x % 1e9) - } - // Use the same sequence of operations as the amd64 compiler. - hi, _ := bits.Mul64(x>>1, 0x89705f4136b4a598) // binary digits of 1e-9 - q := hi >> 28 - return uint32(q), uint32(x - q*1e9) -} diff --git a/src/strconv/ftoaryu_test.go b/src/strconv/ftoaryu_test.go deleted file mode 100644 index 9758619ad1d..00000000000 --- a/src/strconv/ftoaryu_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package strconv_test - -import ( - "math" - . "strconv" - "testing" -) - -func TestMulByLog2Log10(t *testing.T) { - for x := -1600; x <= +1600; x++ { - iMath := MulByLog2Log10(x) - fMath := int(math.Floor(float64(x) * math.Ln2 / math.Ln10)) - if iMath != fMath { - t.Errorf("mulByLog2Log10(%d) failed: %d vs %d\n", x, iMath, fMath) - } - } -} - -func TestMulByLog10Log2(t *testing.T) { - for x := -500; x <= +500; x++ { - iMath := MulByLog10Log2(x) - fMath := int(math.Floor(float64(x) * math.Ln10 / math.Ln2)) - if iMath != fMath { - t.Errorf("mulByLog10Log2(%d) failed: %d vs %d\n", x, iMath, fMath) - } - } -} diff --git a/src/strconv/import_test.go b/src/strconv/import_test.go new file mode 100644 index 00000000000..b44678bc7c4 --- /dev/null +++ b/src/strconv/import_test.go @@ -0,0 +1,12 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package strconv_test + +import . "strconv" + +var ( + baseError = BaseError + bitSizeError = BitSizeError +) diff --git a/src/strconv/internal_test.go b/src/strconv/internal_test.go deleted file mode 100644 index f2cceff20eb..00000000000 --- a/src/strconv/internal_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// export access to strconv internals for tests - -package strconv - -func NewDecimal(i uint64) *decimal { - d := new(decimal) - d.Assign(i) - return d -} - -func SetOptimize(b bool) bool { - old := optimize - optimize = b - return old -} - -func ParseFloatPrefix(s string, bitSize int) (float64, int, error) { - return parseFloatPrefix(s, bitSize) -} - -func MulByLog2Log10(x int) int { - return mulByLog2Log10(x) -} - -func MulByLog10Log2(x int) int { - return mulByLog10Log2(x) -} diff --git a/src/strconv/number.go b/src/strconv/number.go new file mode 100644 index 00000000000..3fa625c35ff --- /dev/null +++ b/src/strconv/number.go @@ -0,0 +1,286 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package strconv + +import ( + "errors" + "internal/strconv" + "internal/stringslite" +) + +// IntSize is the size in bits of an int or uint value. +const IntSize = strconv.IntSize + +// ParseBool returns the boolean value represented by the string. +// It accepts 1, t, T, TRUE, true, True, 0, f, F, FALSE, false, False. +// Any other value returns an error. +func ParseBool(str string) (bool, error) { + x, err := strconv.ParseBool(str) + if err != nil { + return x, toError("ParseBool", str, 0, 0, err) + } + return x, nil +} + +// FormatBool returns "true" or "false" according to the value of b. +func FormatBool(b bool) string { + return strconv.FormatBool(b) +} + +// AppendBool appends "true" or "false", according to the value of b, +// to dst and returns the extended buffer. +func AppendBool(dst []byte, b bool) []byte { + return strconv.AppendBool(dst, b) +} + +// ParseComplex converts the string s to a complex number +// with the precision specified by bitSize: 64 for complex64, or 128 for complex128. +// When bitSize=64, the result still has type complex128, but it will be +// convertible to complex64 without changing its value. +// +// The number represented by s must be of the form N, Ni, or N±Ni, where N stands +// for a floating-point number as recognized by [ParseFloat], and i is the imaginary +// component. If the second N is unsigned, a + sign is required between the two components +// as indicated by the ±. If the second N is NaN, only a + sign is accepted. +// The form may be parenthesized and cannot contain any spaces. +// The resulting complex number consists of the two components converted by ParseFloat. +// +// The errors that ParseComplex returns have concrete type [*NumError] +// and include err.Num = s. +// +// If s is not syntactically well-formed, ParseComplex returns err.Err = ErrSyntax. +// +// If s is syntactically well-formed but either component is more than 1/2 ULP +// away from the largest floating point number of the given component's size, +// ParseComplex returns err.Err = ErrRange and c = ±Inf for the respective component. +func ParseComplex(s string, bitSize int) (complex128, error) { + x, err := strconv.ParseComplex(s, bitSize) + if err != nil { + return x, toError("ParseComplex", s, 0, bitSize, err) + } + return x, nil +} + +// ParseFloat converts the string s to a floating-point number +// with the precision specified by bitSize: 32 for float32, or 64 for float64. +// When bitSize=32, the result still has type float64, but it will be +// convertible to float32 without changing its value. +// +// ParseFloat accepts decimal and hexadecimal floating-point numbers +// as defined by the Go syntax for [floating-point literals]. +// If s is well-formed and near a valid floating-point number, +// ParseFloat returns the nearest floating-point number rounded +// using IEEE754 unbiased rounding. +// (Parsing a hexadecimal floating-point value only rounds when +// there are more bits in the hexadecimal representation than +// will fit in the mantissa.) +// +// The errors that ParseFloat returns have concrete type *NumError +// and include err.Num = s. +// +// If s is not syntactically well-formed, ParseFloat returns err.Err = ErrSyntax. +// +// If s is syntactically well-formed but is more than 1/2 ULP +// away from the largest floating point number of the given size, +// ParseFloat returns f = ±Inf, err.Err = ErrRange. +// +// ParseFloat recognizes the string "NaN", and the (possibly signed) strings "Inf" and "Infinity" +// as their respective special floating point values. It ignores case when matching. +// +// [floating-point literals]: https://go.dev/ref/spec#Floating-point_literals +func ParseFloat(s string, bitSize int) (float64, error) { + x, err := strconv.ParseFloat(s, bitSize) + if err != nil { + return x, toError("ParseFloat", s, 0, bitSize, err) + } + return x, nil +} + +// ParseUint is like [ParseInt] but for unsigned numbers. +// +// A sign prefix is not permitted. +func ParseUint(s string, base int, bitSize int) (uint64, error) { + x, err := strconv.ParseUint(s, base, bitSize) + if err != nil { + return x, toError("ParseUint", s, base, bitSize, err) + } + return x, nil +} + +// ParseInt interprets a string s in the given base (0, 2 to 36) and +// bit size (0 to 64) and returns the corresponding value i. +// +// The string may begin with a leading sign: "+" or "-". +// +// If the base argument is 0, the true base is implied by the string's +// prefix following the sign (if present): 2 for "0b", 8 for "0" or "0o", +// 16 for "0x", and 10 otherwise. Also, for argument base 0 only, +// underscore characters are permitted as defined by the Go syntax for +// [integer literals]. +// +// The bitSize argument specifies the integer type +// that the result must fit into. Bit sizes 0, 8, 16, 32, and 64 +// correspond to int, int8, int16, int32, and int64. +// If bitSize is below 0 or above 64, an error is returned. +// +// The errors that ParseInt returns have concrete type [*NumError] +// and include err.Num = s. If s is empty or contains invalid +// digits, err.Err = [ErrSyntax] and the returned value is 0; +// if the value corresponding to s cannot be represented by a +// signed integer of the given size, err.Err = [ErrRange] and the +// returned value is the maximum magnitude integer of the +// appropriate bitSize and sign. +// +// [integer literals]: https://go.dev/ref/spec#Integer_literals +func ParseInt(s string, base int, bitSize int) (i int64, err error) { + x, err := strconv.ParseInt(s, base, bitSize) + if err != nil { + return x, toError("ParseInt", s, base, bitSize, err) + } + return x, nil +} + +// Atoi is equivalent to ParseInt(s, 10, 0), converted to type int. +func Atoi(s string) (int, error) { + x, err := strconv.Atoi(s) + if err != nil { + return x, toError("Atoi", s, 0, 0, err) + } + return strconv.Atoi(s) +} + +// FormatComplex converts the complex number c to a string of the +// form (a+bi) where a and b are the real and imaginary parts, +// formatted according to the format fmt and precision prec. +// +// The format fmt and precision prec have the same meaning as in [FormatFloat]. +// It rounds the result assuming that the original was obtained from a complex +// value of bitSize bits, which must be 64 for complex64 and 128 for complex128. +func FormatComplex(c complex128, fmt byte, prec, bitSize int) string { + return strconv.FormatComplex(c, fmt, prec, bitSize) +} + +// FormatFloat converts the floating-point number f to a string, +// according to the format fmt and precision prec. It rounds the +// result assuming that the original was obtained from a floating-point +// value of bitSize bits (32 for float32, 64 for float64). +// +// The format fmt is one of +// - 'b' (-ddddp±ddd, a binary exponent), +// - 'e' (-d.dddde±dd, a decimal exponent), +// - 'E' (-d.ddddE±dd, a decimal exponent), +// - 'f' (-ddd.dddd, no exponent), +// - 'g' ('e' for large exponents, 'f' otherwise), +// - 'G' ('E' for large exponents, 'f' otherwise), +// - 'x' (-0xd.ddddp±ddd, a hexadecimal fraction and binary exponent), or +// - 'X' (-0Xd.ddddP±ddd, a hexadecimal fraction and binary exponent). +// +// The precision prec controls the number of digits (excluding the exponent) +// printed by the 'e', 'E', 'f', 'g', 'G', 'x', and 'X' formats. +// For 'e', 'E', 'f', 'x', and 'X', it is the number of digits after the decimal point. +// For 'g' and 'G' it is the maximum number of significant digits (trailing +// zeros are removed). +// The special precision -1 uses the smallest number of digits +// necessary such that ParseFloat will return f exactly. +// The exponent is written as a decimal integer; +// for all formats other than 'b', it will be at least two digits. +func FormatFloat(f float64, fmt byte, prec, bitSize int) string { + return strconv.FormatFloat(f, fmt, prec, bitSize) +} + +// AppendFloat appends the string form of the floating-point number f, +// as generated by [FormatFloat], to dst and returns the extended buffer. +func AppendFloat(dst []byte, f float64, fmt byte, prec, bitSize int) []byte { + return strconv.AppendFloat(dst, f, fmt, prec, bitSize) +} + +// FormatUint returns the string representation of i in the given base, +// for 2 <= base <= 36. The result uses the lower-case letters 'a' to 'z' +// for digit values >= 10. +func FormatUint(i uint64, base int) string { + return strconv.FormatUint(i, base) +} + +// FormatInt returns the string representation of i in the given base, +// for 2 <= base <= 36. The result uses the lower-case letters 'a' to 'z' +// for digit values >= 10. +func FormatInt(i int64, base int) string { + return strconv.FormatInt(i, base) +} + +// Itoa is equivalent to [FormatInt](int64(i), 10). +func Itoa(i int) string { + return strconv.Itoa(i) +} + +// AppendInt appends the string form of the integer i, +// as generated by [FormatInt], to dst and returns the extended buffer. +func AppendInt(dst []byte, i int64, base int) []byte { + return strconv.AppendInt(dst, i, base) +} + +// AppendUint appends the string form of the unsigned integer i, +// as generated by [FormatUint], to dst and returns the extended buffer. +func AppendUint(dst []byte, i uint64, base int) []byte { + return strconv.AppendUint(dst, i, base) +} + +// toError converts from internal/strconv.Error to the error guaranteed by this package's APIs. +func toError(fn, s string, base, bitSize int, err error) error { + switch err { + case strconv.ErrSyntax: + return syntaxError(fn, s) + case strconv.ErrRange: + return rangeError(fn, s) + case strconv.ErrBase: + return baseError(fn, s, base) + case strconv.ErrBitSize: + return bitSizeError(fn, s, bitSize) + } + return err +} + +// ErrRange indicates that a value is out of range for the target type. +var ErrRange = errors.New("value out of range") + +// ErrSyntax indicates that a value does not have the right syntax for the target type. +var ErrSyntax = errors.New("invalid syntax") + +// A NumError records a failed conversion. +type NumError struct { + Func string // the failing function (ParseBool, ParseInt, ParseUint, ParseFloat, ParseComplex) + Num string // the input + Err error // the reason the conversion failed (e.g. ErrRange, ErrSyntax, etc.) +} + +func (e *NumError) Error() string { + return "strconv." + e.Func + ": " + "parsing " + Quote(e.Num) + ": " + e.Err.Error() +} + +func (e *NumError) Unwrap() error { return e.Err } + +// All ParseXXX functions allow the input string to escape to the error value. +// This hurts strconv.ParseXXX(string(b)) calls where b is []byte since +// the conversion from []byte must allocate a string on the heap. +// If we assume errors are infrequent, then we can avoid escaping the input +// back to the output by copying it first. This allows the compiler to call +// strconv.ParseXXX without a heap allocation for most []byte to string +// conversions, since it can now prove that the string cannot escape Parse. + +func syntaxError(fn, str string) *NumError { + return &NumError{fn, stringslite.Clone(str), ErrSyntax} +} + +func rangeError(fn, str string) *NumError { + return &NumError{fn, stringslite.Clone(str), ErrRange} +} + +func baseError(fn, str string, base int) *NumError { + return &NumError{fn, stringslite.Clone(str), errors.New("invalid base " + Itoa(base))} +} + +func bitSizeError(fn, str string, bitSize int) *NumError { + return &NumError{fn, stringslite.Clone(str), errors.New("invalid bit size " + Itoa(bitSize))} +} diff --git a/src/strconv/number_test.go b/src/strconv/number_test.go new file mode 100644 index 00000000000..4408bc05564 --- /dev/null +++ b/src/strconv/number_test.go @@ -0,0 +1,956 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Note: These tests are focused mainly on generating the right errors. +// The extensive numerical tests are in ../internal/strconv. +// Add new tests there instead of here whenever possible. + +package strconv_test + +import ( + "bytes" + "errors" + "math" + "math/cmplx" + "reflect" + . "strconv" + "testing" +) + +type atobTest struct { + in string + out bool + err error +} + +var atobtests = []atobTest{ + {"", false, ErrSyntax}, + {"asdf", false, ErrSyntax}, + {"0", false, nil}, + {"false", false, nil}, + {"true", true, nil}, +} + +func TestParseBool(t *testing.T) { + for _, test := range atobtests { + b, e := ParseBool(test.in) + if test.err != nil { + // expect an error + if e == nil { + t.Errorf("ParseBool(%s) = nil; want %s", test.in, test.err) + } else { + // NumError assertion must succeed; it's the only thing we return. + if e.(*NumError).Err != test.err { + t.Errorf("ParseBool(%s) = %s; want %s", test.in, e, test.err) + } + } + } else { + if e != nil { + t.Errorf("ParseBool(%s) = %s; want nil", test.in, e) + } + if b != test.out { + t.Errorf("ParseBool(%s) = %t; want %t", test.in, b, test.out) + } + } + } +} + +var boolString = map[bool]string{ + true: "true", + false: "false", +} + +func TestFormatBool(t *testing.T) { + for b, s := range boolString { + if f := FormatBool(b); f != s { + t.Errorf("FormatBool(%v) = %q; want %q", b, f, s) + } + } +} + +type appendBoolTest struct { + b bool + in []byte + out []byte +} + +var appendBoolTests = []appendBoolTest{ + {true, []byte("foo "), []byte("foo true")}, + {false, []byte("foo "), []byte("foo false")}, +} + +func TestAppendBool(t *testing.T) { + for _, test := range appendBoolTests { + b := AppendBool(test.in, test.b) + if !bytes.Equal(b, test.out) { + t.Errorf("AppendBool(%q, %v) = %q; want %q", test.in, test.b, b, test.out) + } + } +} + +var ( + infp0 = complex(math.Inf(+1), 0) + infm0 = complex(math.Inf(-1), 0) + inf0p = complex(0, math.Inf(+1)) + inf0m = complex(0, math.Inf(-1)) + + infpp = complex(math.Inf(+1), math.Inf(+1)) + infpm = complex(math.Inf(+1), math.Inf(-1)) + infmp = complex(math.Inf(-1), math.Inf(+1)) + infmm = complex(math.Inf(-1), math.Inf(-1)) +) + +type atocTest struct { + in string + out complex128 + err error +} + +func TestParseComplex(t *testing.T) { + tests := []atocTest{ + // Clearly invalid + {"", 0, ErrSyntax}, + {" ", 0, ErrSyntax}, + {"(", 0, ErrSyntax}, + {")", 0, ErrSyntax}, + {"i", 0, ErrSyntax}, + {"+i", 0, ErrSyntax}, + {"-i", 0, ErrSyntax}, + {"1I", 0, ErrSyntax}, + {"10 + 5i", 0, ErrSyntax}, + {"3+", 0, ErrSyntax}, + {"3+5", 0, ErrSyntax}, + {"3+5+5i", 0, ErrSyntax}, + + // Parentheses + {"()", 0, ErrSyntax}, + {"(i)", 0, ErrSyntax}, + {"(0)", 0, nil}, + {"(1i)", 1i, nil}, + {"(3.0+5.5i)", 3.0 + 5.5i, nil}, + {"(1)+1i", 0, ErrSyntax}, + {"(3.0+5.5i", 0, ErrSyntax}, + {"3.0+5.5i)", 0, ErrSyntax}, + + // NaNs + {"NaN", complex(math.NaN(), 0), nil}, + {"NANi", complex(0, math.NaN()), nil}, + {"nan+nAni", complex(math.NaN(), math.NaN()), nil}, + {"+NaN", 0, ErrSyntax}, + {"-NaN", 0, ErrSyntax}, + {"NaN-NaNi", 0, ErrSyntax}, + + // Infs + {"Inf", infp0, nil}, + {"+inf", infp0, nil}, + {"-inf", infm0, nil}, + {"Infinity", infp0, nil}, + {"+INFINITY", infp0, nil}, + {"-infinity", infm0, nil}, + {"+infi", inf0p, nil}, + {"0-infinityi", inf0m, nil}, + {"Inf+Infi", infpp, nil}, + {"+Inf-Infi", infpm, nil}, + {"-Infinity+Infi", infmp, nil}, + {"inf-inf", 0, ErrSyntax}, + + // Zeros + {"0", 0, nil}, + {"0i", 0, nil}, + {"-0.0i", 0, nil}, + {"0+0.0i", 0, nil}, + {"0e+0i", 0, nil}, + {"0e-0+0i", 0, nil}, + {"-0.0-0.0i", 0, nil}, + {"0e+012345", 0, nil}, + {"0x0p+012345i", 0, nil}, + {"0x0.00p-012345i", 0, nil}, + {"+0e-0+0e-0i", 0, nil}, + {"0e+0+0e+0i", 0, nil}, + {"-0e+0-0e+0i", 0, nil}, + + // Regular non-zeroes + {"0.1", 0.1, nil}, + {"0.1i", 0 + 0.1i, nil}, + {"0.123", 0.123, nil}, + {"0.123i", 0 + 0.123i, nil}, + {"0.123+0.123i", 0.123 + 0.123i, nil}, + {"99", 99, nil}, + {"+99", 99, nil}, + {"-99", -99, nil}, + {"+1i", 1i, nil}, + {"-1i", -1i, nil}, + {"+3+1i", 3 + 1i, nil}, + {"30+3i", 30 + 3i, nil}, + {"+3e+3-3e+3i", 3e+3 - 3e+3i, nil}, + {"+3e+3+3e+3i", 3e+3 + 3e+3i, nil}, + {"+3e+3+3e+3i+", 0, ErrSyntax}, + + // Separators + {"0.1", 0.1, nil}, + {"0.1i", 0 + 0.1i, nil}, + {"0.1_2_3", 0.123, nil}, + {"+0x_3p3i", 0x3p3i, nil}, + {"0_0+0x_0p0i", 0, nil}, + {"0x_10.3p-8+0x3p3i", 0x10.3p-8 + 0x3p3i, nil}, + {"+0x_1_0.3p-8+0x_3_0p3i", 0x10.3p-8 + 0x30p3i, nil}, + {"0x1_0.3p+8-0x_3p3i", 0x10.3p+8 - 0x3p3i, nil}, + + // Hexadecimals + {"0x10.3p-8+0x3p3i", 0x10.3p-8 + 0x3p3i, nil}, + {"+0x10.3p-8+0x3p3i", 0x10.3p-8 + 0x3p3i, nil}, + {"0x10.3p+8-0x3p3i", 0x10.3p+8 - 0x3p3i, nil}, + {"0x1p0", 1, nil}, + {"0x1p1", 2, nil}, + {"0x1p-1", 0.5, nil}, + {"0x1ep-1", 15, nil}, + {"-0x1ep-1", -15, nil}, + {"-0x2p3", -16, nil}, + {"0x1e2", 0, ErrSyntax}, + {"1p2", 0, ErrSyntax}, + {"0x1e2i", 0, ErrSyntax}, + + // ErrRange + // next float64 - too large + {"+0x1p1024", infp0, ErrRange}, + {"-0x1p1024", infm0, ErrRange}, + {"+0x1p1024i", inf0p, ErrRange}, + {"-0x1p1024i", inf0m, ErrRange}, + {"+0x1p1024+0x1p1024i", infpp, ErrRange}, + {"+0x1p1024-0x1p1024i", infpm, ErrRange}, + {"-0x1p1024+0x1p1024i", infmp, ErrRange}, + {"-0x1p1024-0x1p1024i", infmm, ErrRange}, + // the border is ...158079 + // borderline - okay + {"+0x1.fffffffffffff7fffp1023+0x1.fffffffffffff7fffp1023i", 1.7976931348623157e+308 + 1.7976931348623157e+308i, nil}, + {"+0x1.fffffffffffff7fffp1023-0x1.fffffffffffff7fffp1023i", 1.7976931348623157e+308 - 1.7976931348623157e+308i, nil}, + {"-0x1.fffffffffffff7fffp1023+0x1.fffffffffffff7fffp1023i", -1.7976931348623157e+308 + 1.7976931348623157e+308i, nil}, + {"-0x1.fffffffffffff7fffp1023-0x1.fffffffffffff7fffp1023i", -1.7976931348623157e+308 - 1.7976931348623157e+308i, nil}, + // borderline - too large + {"+0x1.fffffffffffff8p1023", infp0, ErrRange}, + {"-0x1fffffffffffff.8p+971", infm0, ErrRange}, + {"+0x1.fffffffffffff8p1023i", inf0p, ErrRange}, + {"-0x1fffffffffffff.8p+971i", inf0m, ErrRange}, + {"+0x1.fffffffffffff8p1023+0x1.fffffffffffff8p1023i", infpp, ErrRange}, + {"+0x1.fffffffffffff8p1023-0x1.fffffffffffff8p1023i", infpm, ErrRange}, + {"-0x1fffffffffffff.8p+971+0x1fffffffffffff.8p+971i", infmp, ErrRange}, + {"-0x1fffffffffffff8p+967-0x1fffffffffffff8p+967i", infmm, ErrRange}, + // a little too large + {"1e308+1e308i", 1e+308 + 1e+308i, nil}, + {"2e308+2e308i", infpp, ErrRange}, + {"1e309+1e309i", infpp, ErrRange}, + {"0x1p1025+0x1p1025i", infpp, ErrRange}, + {"2e308", infp0, ErrRange}, + {"1e309", infp0, ErrRange}, + {"0x1p1025", infp0, ErrRange}, + {"2e308i", inf0p, ErrRange}, + {"1e309i", inf0p, ErrRange}, + {"0x1p1025i", inf0p, ErrRange}, + // way too large + {"+1e310+1e310i", infpp, ErrRange}, + {"+1e310-1e310i", infpm, ErrRange}, + {"-1e310+1e310i", infmp, ErrRange}, + {"-1e310-1e310i", infmm, ErrRange}, + // under/overflow exponent + {"1e-4294967296", 0, nil}, + {"1e-4294967296i", 0, nil}, + {"1e-4294967296+1i", 1i, nil}, + {"1+1e-4294967296i", 1, nil}, + {"1e-4294967296+1e-4294967296i", 0, nil}, + {"1e+4294967296", infp0, ErrRange}, + {"1e+4294967296i", inf0p, ErrRange}, + {"1e+4294967296+1e+4294967296i", infpp, ErrRange}, + {"1e+4294967296-1e+4294967296i", infpm, ErrRange}, + } + for i := range tests { + test := &tests[i] + if test.err != nil { + test.err = &NumError{Func: "ParseComplex", Num: test.in, Err: test.err} + } + got, err := ParseComplex(test.in, 128) + if !reflect.DeepEqual(err, test.err) { + t.Fatalf("ParseComplex(%q, 128) = %v, %v; want %v, %v", test.in, got, err, test.out, test.err) + } + if !(cmplx.IsNaN(test.out) && cmplx.IsNaN(got)) && got != test.out { + t.Fatalf("ParseComplex(%q, 128) = %v, %v; want %v, %v", test.in, got, err, test.out, test.err) + } + + if complex128(complex64(test.out)) == test.out { + got, err := ParseComplex(test.in, 64) + if !reflect.DeepEqual(err, test.err) { + t.Fatalf("ParseComplex(%q, 64) = %v, %v; want %v, %v", test.in, got, err, test.out, test.err) + } + got64 := complex64(got) + if complex128(got64) != test.out { + t.Fatalf("ParseComplex(%q, 64) = %v, %v; want %v, %v", test.in, got, err, test.out, test.err) + } + } + } +} + +// Issue 42297: allow ParseComplex(s, not_32_or_64) for legacy reasons +func TestParseComplexIncorrectBitSize(t *testing.T) { + const s = "1.5e308+1.0e307i" + const want = 1.5e308 + 1.0e307i + + for _, bitSize := range []int{0, 10, 100, 256} { + c, err := ParseComplex(s, bitSize) + if err != nil { + t.Fatalf("ParseComplex(%q, %d) gave error %s", s, bitSize, err) + } + if c != want { + t.Fatalf("ParseComplex(%q, %d) = %g (expected %g)", s, bitSize, c, want) + } + } +} + +type atofTest struct { + in string + out string + err error +} + +var atoftests = []atofTest{ + {"", "0", ErrSyntax}, + {"1.25", "1.25", nil}, + {"+1", "1", nil}, + {"1x", "0", ErrSyntax}, + {"1.1.", "0", ErrSyntax}, + {"1e23", "1e+23", nil}, + {"1E23", "1e+23", nil}, + {"0x1fFe2.p0", "131042", nil}, + {"0x1fFe2.P0", "131042", nil}, + {"-0x2p3", "-16", nil}, + {"0x0.fp4", "15", nil}, + {"0x0.fp0", "0.9375", nil}, + {"0x1e2", "0", ErrSyntax}, + {"1p2", "0", ErrSyntax}, + {"0x1p1024", "+Inf", ErrRange}, + {"-0x1p1024", "-Inf", ErrRange}, + {"0x1.fffffffffffff7fffp1023", "1.7976931348623157e+308", nil}, + {"-0x1.fffffffffffff7fffp1023", "-1.7976931348623157e+308", nil}, + {"1.797693134862315808e308", "+Inf", ErrRange}, + {"-1.797693134862315808e308", "-Inf", ErrRange}, +} + +func init() { + // The atof routines return NumErrors wrapping + // the error and the string. Convert the table above. + for i := range atoftests { + test := &atoftests[i] + if test.err != nil { + test.err = &NumError{"ParseFloat", test.in, test.err} + } + } +} + +func TestAtof(t *testing.T) { + for i := 0; i < len(atoftests); i++ { + test := &atoftests[i] + out, err := ParseFloat(test.in, 64) + outs := FormatFloat(out, 'g', -1, 64) + if outs != test.out || !reflect.DeepEqual(err, test.err) { + t.Errorf("ParseFloat(%v, 64) = %v, %v want %v, %v", + test.in, out, err, test.out, test.err) + } + + if float64(float32(out)) == out { + out, err := ParseFloat(test.in, 32) + out32 := float32(out) + if float64(out32) != out { + t.Errorf("ParseFloat(%v, 32) = %v, not a float32 (closest is %v)", test.in, out, float64(out32)) + continue + } + outs := FormatFloat(float64(out32), 'g', -1, 32) + if outs != test.out || !reflect.DeepEqual(err, test.err) { + t.Errorf("ParseFloat(%v, 32) = %v, %v want %v, %v # %v", + test.in, out32, err, test.out, test.err, out) + } + } + } +} + +type parseUint64Test struct { + in string + out uint64 + err error +} + +var parseUint64Tests = []parseUint64Test{ + {"", 0, ErrSyntax}, + {"0", 0, nil}, + {"1", 1, nil}, + {"12345", 12345, nil}, + {"012345", 12345, nil}, + {"18446744073709551616", 1<<64 - 1, ErrRange}, + {"-1", 0, ErrSyntax}, +} + +type parseUint64BaseTest struct { + in string + base int + out uint64 + err error +} + +var parseUint64BaseTests = []parseUint64BaseTest{ + {"", 0, 0, ErrSyntax}, + {"0", 0, 0, nil}, + {"1", 0, 1, nil}, + {"-1", 0, 0, ErrSyntax}, + {"12345", 0, 12345, nil}, + {"012345", 0, 012345, nil}, + {"18446744073709551616", 0, 1<<64 - 1, ErrRange}, + {"0b", 0, 0, ErrSyntax}, + {"101", 2, 5, nil}, + {"101_", 2, 0, ErrSyntax}, +} + +type parseInt64Test struct { + in string + out int64 + err error +} + +var parseInt64Tests = []parseInt64Test{ + {"", 0, ErrSyntax}, + {"0", 0, nil}, + {"1", 1, nil}, + {"-1", -1, nil}, + {"12345", 12345, nil}, + {"9223372036854775808", 1<<63 - 1, ErrRange}, + {"123%45", 0, ErrSyntax}, +} + +type parseInt64BaseTest struct { + in string + base int + out int64 + err error +} + +var parseInt64BaseTests = []parseInt64BaseTest{ + {"", 0, 0, ErrSyntax}, + {"0", 0, 0, nil}, + {"1", 0, 1, nil}, + {"-1", 0, -1, nil}, + {"12345", 0, 12345, nil}, + {"12345", 9, 8303, nil}, + {"012345", 0, 012345, nil}, + {"9223372036854775808", 10, 1<<63 - 1, ErrRange}, + {"0b", 0, 0, ErrSyntax}, + {"101", 2, 5, nil}, + {"101_", 2, 0, ErrSyntax}, +} + +type parseUint32Test struct { + in string + out uint32 + err error +} + +var parseUint32Tests = []parseUint32Test{ + {"", 0, ErrSyntax}, + {"0", 0, nil}, + {"1", 1, nil}, + {"12345", 12345, nil}, + {"12345x", 0, ErrSyntax}, + {"987654321", 987654321, nil}, + {"4294967296", 1<<32 - 1, ErrRange}, + {"1_2_3_4_5", 0, ErrSyntax}, // base=10 so no underscores allowed + {"12345_", 0, ErrSyntax}, +} + +type parseInt32Test struct { + in string + out int32 + err error +} + +var parseInt32Tests = []parseInt32Test{ + {"", 0, ErrSyntax}, + {"0", 0, nil}, + {"-0", 0, nil}, + {"1", 1, nil}, + {"-1", -1, nil}, + {"12345", 12345, nil}, + {"-12345", -12345, nil}, + {"2147483648", 1<<31 - 1, ErrRange}, + {"12345_", 0, ErrSyntax}, +} + +type numErrorTest struct { + num, want string +} + +var numErrorTests = []numErrorTest{ + {"0", `strconv.ParseFloat: parsing "0": failed`}, + {"`", "strconv.ParseFloat: parsing \"`\": failed"}, + {"1\x00.2", `strconv.ParseFloat: parsing "1\x00.2": failed`}, +} + +func init() { + // The parse routines return NumErrors wrapping + // the error and the string. Convert the tables above. + for i := range parseUint64Tests { + test := &parseUint64Tests[i] + if test.err != nil { + test.err = &NumError{"ParseUint", test.in, test.err} + } + } + for i := range parseUint64BaseTests { + test := &parseUint64BaseTests[i] + if test.err != nil { + test.err = &NumError{"ParseUint", test.in, test.err} + } + } + for i := range parseInt64Tests { + test := &parseInt64Tests[i] + if test.err != nil { + test.err = &NumError{"ParseInt", test.in, test.err} + } + } + for i := range parseInt64BaseTests { + test := &parseInt64BaseTests[i] + if test.err != nil { + test.err = &NumError{"ParseInt", test.in, test.err} + } + } + for i := range parseUint32Tests { + test := &parseUint32Tests[i] + if test.err != nil { + test.err = &NumError{"ParseUint", test.in, test.err} + } + } + for i := range parseInt32Tests { + test := &parseInt32Tests[i] + if test.err != nil { + test.err = &NumError{"ParseInt", test.in, test.err} + } + } +} + +func TestParseUint32(t *testing.T) { + for i := range parseUint32Tests { + test := &parseUint32Tests[i] + out, err := ParseUint(test.in, 10, 32) + if uint64(test.out) != out || !reflect.DeepEqual(test.err, err) { + t.Errorf("ParseUint(%q, 10, 32) = %v, %v want %v, %v", + test.in, out, err, test.out, test.err) + } + } +} + +func TestParseUint64(t *testing.T) { + for i := range parseUint64Tests { + test := &parseUint64Tests[i] + out, err := ParseUint(test.in, 10, 64) + if test.out != out || !reflect.DeepEqual(test.err, err) { + t.Errorf("ParseUint(%q, 10, 64) = %v, %v want %v, %v", + test.in, out, err, test.out, test.err) + } + } +} + +func TestParseUint64Base(t *testing.T) { + for i := range parseUint64BaseTests { + test := &parseUint64BaseTests[i] + out, err := ParseUint(test.in, test.base, 64) + if test.out != out || !reflect.DeepEqual(test.err, err) { + t.Errorf("ParseUint(%q, %v, 64) = %v, %v want %v, %v", + test.in, test.base, out, err, test.out, test.err) + } + } +} + +func TestParseInt32(t *testing.T) { + for i := range parseInt32Tests { + test := &parseInt32Tests[i] + out, err := ParseInt(test.in, 10, 32) + if int64(test.out) != out || !reflect.DeepEqual(test.err, err) { + t.Errorf("ParseInt(%q, 10 ,32) = %v, %v want %v, %v", + test.in, out, err, test.out, test.err) + } + } +} + +func TestParseInt64(t *testing.T) { + for i := range parseInt64Tests { + test := &parseInt64Tests[i] + out, err := ParseInt(test.in, 10, 64) + if test.out != out || !reflect.DeepEqual(test.err, err) { + t.Errorf("ParseInt(%q, 10, 64) = %v, %v want %v, %v", + test.in, out, err, test.out, test.err) + } + } +} + +func TestParseInt64Base(t *testing.T) { + for i := range parseInt64BaseTests { + test := &parseInt64BaseTests[i] + out, err := ParseInt(test.in, test.base, 64) + if test.out != out || !reflect.DeepEqual(test.err, err) { + t.Errorf("ParseInt(%q, %v, 64) = %v, %v want %v, %v", + test.in, test.base, out, err, test.out, test.err) + } + } +} + +func TestParseUint(t *testing.T) { + switch IntSize { + case 32: + for i := range parseUint32Tests { + test := &parseUint32Tests[i] + out, err := ParseUint(test.in, 10, 0) + if uint64(test.out) != out || !reflect.DeepEqual(test.err, err) { + t.Errorf("ParseUint(%q, 10, 0) = %v, %v want %v, %v", + test.in, out, err, test.out, test.err) + } + } + case 64: + for i := range parseUint64Tests { + test := &parseUint64Tests[i] + out, err := ParseUint(test.in, 10, 0) + if test.out != out || !reflect.DeepEqual(test.err, err) { + t.Errorf("ParseUint(%q, 10, 0) = %v, %v want %v, %v", + test.in, out, err, test.out, test.err) + } + } + } +} + +func TestParseInt(t *testing.T) { + switch IntSize { + case 32: + for i := range parseInt32Tests { + test := &parseInt32Tests[i] + out, err := ParseInt(test.in, 10, 0) + if int64(test.out) != out || !reflect.DeepEqual(test.err, err) { + t.Errorf("ParseInt(%q, 10, 0) = %v, %v want %v, %v", + test.in, out, err, test.out, test.err) + } + } + case 64: + for i := range parseInt64Tests { + test := &parseInt64Tests[i] + out, err := ParseInt(test.in, 10, 0) + if test.out != out || !reflect.DeepEqual(test.err, err) { + t.Errorf("ParseInt(%q, 10, 0) = %v, %v want %v, %v", + test.in, out, err, test.out, test.err) + } + } + } +} + +func TestAtoi(t *testing.T) { + switch IntSize { + case 32: + for i := range parseInt32Tests { + test := &parseInt32Tests[i] + out, err := Atoi(test.in) + var testErr error + if test.err != nil { + testErr = &NumError{"Atoi", test.in, test.err.(*NumError).Err} + } + if int(test.out) != out || !reflect.DeepEqual(testErr, err) { + t.Errorf("Atoi(%q) = %v, %v want %v, %v", + test.in, out, err, test.out, testErr) + } + } + case 64: + for i := range parseInt64Tests { + test := &parseInt64Tests[i] + out, err := Atoi(test.in) + var testErr error + if test.err != nil { + testErr = &NumError{"Atoi", test.in, test.err.(*NumError).Err} + } + if test.out != int64(out) || !reflect.DeepEqual(testErr, err) { + t.Errorf("Atoi(%q) = %v, %v want %v, %v", + test.in, out, err, test.out, testErr) + } + } + } +} + +func bitSizeErrStub(name string, bitSize int) error { + return bitSizeError(name, "0", bitSize) +} + +func baseErrStub(name string, base int) error { + return baseError(name, "0", base) +} + +func noErrStub(name string, arg int) error { + return nil +} + +type parseErrorTest struct { + arg int + errStub func(name string, arg int) error +} + +var parseBitSizeTests = []parseErrorTest{ + {-1, bitSizeErrStub}, + {0, noErrStub}, + {64, noErrStub}, + {65, bitSizeErrStub}, +} + +var parseBaseTests = []parseErrorTest{ + {-1, baseErrStub}, + {0, noErrStub}, + {1, baseErrStub}, + {2, noErrStub}, + {36, noErrStub}, + {37, baseErrStub}, +} + +func equalError(a, b error) bool { + if a == nil { + return b == nil + } + if b == nil { + return a == nil + } + return a.Error() == b.Error() +} + +func TestParseIntBitSize(t *testing.T) { + for i := range parseBitSizeTests { + test := &parseBitSizeTests[i] + testErr := test.errStub("ParseInt", test.arg) + _, err := ParseInt("0", 0, test.arg) + if !equalError(testErr, err) { + t.Errorf("ParseInt(\"0\", 0, %v) = 0, %v want 0, %v", + test.arg, err, testErr) + } + } +} + +func TestParseUintBitSize(t *testing.T) { + for i := range parseBitSizeTests { + test := &parseBitSizeTests[i] + testErr := test.errStub("ParseUint", test.arg) + _, err := ParseUint("0", 0, test.arg) + if !equalError(testErr, err) { + t.Errorf("ParseUint(\"0\", 0, %v) = 0, %v want 0, %v", + test.arg, err, testErr) + } + } +} + +func TestParseIntBase(t *testing.T) { + for i := range parseBaseTests { + test := &parseBaseTests[i] + testErr := test.errStub("ParseInt", test.arg) + _, err := ParseInt("0", test.arg, 0) + if !equalError(testErr, err) { + t.Errorf("ParseInt(\"0\", %v, 0) = 0, %v want 0, %v", + test.arg, err, testErr) + } + } +} + +func TestParseUintBase(t *testing.T) { + for i := range parseBaseTests { + test := &parseBaseTests[i] + testErr := test.errStub("ParseUint", test.arg) + _, err := ParseUint("0", test.arg, 0) + if !equalError(testErr, err) { + t.Errorf("ParseUint(\"0\", %v, 0) = 0, %v want 0, %v", + test.arg, err, testErr) + } + } +} + +func TestNumError(t *testing.T) { + for _, test := range numErrorTests { + err := &NumError{ + Func: "ParseFloat", + Num: test.num, + Err: errors.New("failed"), + } + if got := err.Error(); got != test.want { + t.Errorf(`(&NumError{"ParseFloat", %q, "failed"}).Error() = %v, want %v`, test.num, got, test.want) + } + } +} + +func TestNumErrorUnwrap(t *testing.T) { + err := &NumError{Err: ErrSyntax} + if !errors.Is(err, ErrSyntax) { + t.Error("errors.Is failed, wanted success") + } +} + +func TestFormatComplex(t *testing.T) { + tests := []struct { + c complex128 + fmt byte + prec int + bitSize int + out string + }{ + // a variety of signs + {1 + 2i, 'g', -1, 128, "(1+2i)"}, + {3 - 4i, 'g', -1, 128, "(3-4i)"}, + {-5 + 6i, 'g', -1, 128, "(-5+6i)"}, + {-7 - 8i, 'g', -1, 128, "(-7-8i)"}, + + // test that fmt and prec are working + {3.14159 + 0.00123i, 'e', 3, 128, "(3.142e+00+1.230e-03i)"}, + {3.14159 + 0.00123i, 'f', 3, 128, "(3.142+0.001i)"}, + {3.14159 + 0.00123i, 'g', 3, 128, "(3.14+0.00123i)"}, + + // ensure bitSize rounding is working + {1.2345678901234567 + 9.876543210987654i, 'f', -1, 128, "(1.2345678901234567+9.876543210987654i)"}, + {1.2345678901234567 + 9.876543210987654i, 'f', -1, 64, "(1.2345679+9.876543i)"}, + + // other cases are handled by FormatFloat tests + } + for _, test := range tests { + out := FormatComplex(test.c, test.fmt, test.prec, test.bitSize) + if out != test.out { + t.Fatalf("FormatComplex(%v, %q, %d, %d) = %q; want %q", + test.c, test.fmt, test.prec, test.bitSize, out, test.out) + } + } +} + +func TestFormatComplexInvalidBitSize(t *testing.T) { + defer func() { + if r := recover(); r == nil { + t.Fatalf("expected panic due to invalid bitSize") + } + }() + _ = FormatComplex(1+2i, 'g', -1, 100) +} + +type itob64Test struct { + in int64 + base int + out string +} + +var itob64tests = []itob64Test{ + {0, 10, "0"}, + {1, 10, "1"}, + {-1, 10, "-1"}, + {12345678, 10, "12345678"}, + {-1 << 63, 10, "-9223372036854775808"}, + {16, 17, "g"}, + {25, 25, "10"}, + {(((((17*36+24)*36+21)*36+34)*36+12)*36+24)*36 + 32, 36, "holycow"}, +} + +func TestItoa(t *testing.T) { + for _, test := range itob64tests { + s := FormatInt(test.in, test.base) + if s != test.out { + t.Errorf("FormatInt(%v, %v) = %v want %v", + test.in, test.base, s, test.out) + } + x := AppendInt([]byte("abc"), test.in, test.base) + if string(x) != "abc"+test.out { + t.Errorf("AppendInt(%q, %v, %v) = %q want %v", + "abc", test.in, test.base, x, test.out) + } + + if test.in >= 0 { + s := FormatUint(uint64(test.in), test.base) + if s != test.out { + t.Errorf("FormatUint(%v, %v) = %v want %v", + test.in, test.base, s, test.out) + } + x := AppendUint(nil, uint64(test.in), test.base) + if string(x) != test.out { + t.Errorf("AppendUint(%q, %v, %v) = %q want %v", + "abc", uint64(test.in), test.base, x, test.out) + } + } + + if test.base == 10 && int64(int(test.in)) == test.in { + s := Itoa(int(test.in)) + if s != test.out { + t.Errorf("Itoa(%v) = %v want %v", + test.in, s, test.out) + } + } + } + + // Override when base is illegal + defer func() { + if r := recover(); r == nil { + t.Fatalf("expected panic due to illegal base") + } + }() + FormatUint(12345678, 1) +} + +type uitob64Test struct { + in uint64 + base int + out string +} + +var uitob64tests = []uitob64Test{ + {1<<63 - 1, 10, "9223372036854775807"}, + {1 << 63, 10, "9223372036854775808"}, + {1<<63 + 1, 10, "9223372036854775809"}, + {1<<64 - 2, 10, "18446744073709551614"}, + {1<<64 - 1, 10, "18446744073709551615"}, + {1<<64 - 1, 2, "1111111111111111111111111111111111111111111111111111111111111111"}, +} + +func TestUitoa(t *testing.T) { + for _, test := range uitob64tests { + s := FormatUint(test.in, test.base) + if s != test.out { + t.Errorf("FormatUint(%v, %v) = %v want %v", + test.in, test.base, s, test.out) + } + x := AppendUint([]byte("abc"), test.in, test.base) + if string(x) != "abc"+test.out { + t.Errorf("AppendUint(%q, %v, %v) = %q want %v", + "abc", test.in, test.base, x, test.out) + } + + } +} + +var varlenUints = []struct { + in uint64 + out string +}{ + {1, "1"}, + {12, "12"}, + {123, "123"}, + {1234, "1234"}, + {12345, "12345"}, + {123456, "123456"}, + {1234567, "1234567"}, + {12345678, "12345678"}, + {123456789, "123456789"}, + {1234567890, "1234567890"}, + {12345678901, "12345678901"}, + {123456789012, "123456789012"}, + {1234567890123, "1234567890123"}, + {12345678901234, "12345678901234"}, + {123456789012345, "123456789012345"}, + {1234567890123456, "1234567890123456"}, + {12345678901234567, "12345678901234567"}, + {123456789012345678, "123456789012345678"}, + {1234567890123456789, "1234567890123456789"}, + {12345678901234567890, "12345678901234567890"}, +} + +func TestFormatUintVarlen(t *testing.T) { + for _, test := range varlenUints { + s := FormatUint(test.in, 10) + if s != test.out { + t.Errorf("FormatUint(%v, 10) = %v want %v", test.in, s, test.out) + } + } +} diff --git a/src/strings/strings.go b/src/strings/strings.go index 1efd00d5f09..80cea67d626 100644 --- a/src/strings/strings.go +++ b/src/strings/strings.go @@ -1085,8 +1085,9 @@ func trimRightUnicode(s, cutset string) string { return s } -// TrimSpace returns a slice of the string s, with all leading -// and trailing white space removed, as defined by Unicode. +// TrimSpace returns a slice (substring) of the string s, +// with all leading and trailing white space removed, +// as defined by Unicode. func TrimSpace(s string) string { // Fast path for ASCII: look for the first ASCII non-space byte. for lo, c := range []byte(s) { diff --git a/src/strings/strings_test.go b/src/strings/strings_test.go index b10b5f05cca..edfeb0e8138 100644 --- a/src/strings/strings_test.go +++ b/src/strings/strings_test.go @@ -694,7 +694,7 @@ func rot13(r rune) rune { func TestMap(t *testing.T) { // Run a couple of awful growth/shrinkage tests a := tenRunes('a') - // 1. Grow. This triggers two reallocations in Map. + // 1. Grow. This triggers two reallocations in Map. maxRune := func(rune) rune { return unicode.MaxRune } m := Map(maxRune, a) expect := tenRunes(unicode.MaxRune) diff --git a/src/sync/atomic/value.go b/src/sync/atomic/value.go index 0cfc5f9496c..031a9d5680e 100644 --- a/src/sync/atomic/value.go +++ b/src/sync/atomic/value.go @@ -98,8 +98,7 @@ func (v *Value) Swap(new any) (old any) { if typ == nil { // Attempt to start first store. // Disable preemption so that other goroutines can use - // active spin wait to wait for completion; and so that - // GC does not see the fake type accidentally. + // active spin wait to wait for completion. runtime_procPin() if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(&firstStoreInProgress)) { runtime_procUnpin() @@ -150,8 +149,7 @@ func (v *Value) CompareAndSwap(old, new any) (swapped bool) { } // Attempt to start first store. // Disable preemption so that other goroutines can use - // active spin wait to wait for completion; and so that - // GC does not see the fake type accidentally. + // active spin wait to wait for completion. runtime_procPin() if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(&firstStoreInProgress)) { runtime_procUnpin() diff --git a/src/sync/atomic/value_test.go b/src/sync/atomic/value_test.go index 721da965e35..b8bc8b8851b 100644 --- a/src/sync/atomic/value_test.go +++ b/src/sync/atomic/value_test.go @@ -185,7 +185,6 @@ func TestValueSwapConcurrent(t *testing.T) { n = 1000 } for i := uint64(0); i < m*n; i += n { - i := i g.Add(1) go func() { var c uint64 @@ -256,7 +255,6 @@ func TestValueCompareAndSwapConcurrent(t *testing.T) { n = 100 } for i := 0; i < m; i++ { - i := i w.Add(1) go func() { for j := i; j < m*n; runtime.Gosched() { diff --git a/src/sync/once.go b/src/sync/once.go index 1573b28b28a..7c9ad8dd1aa 100644 --- a/src/sync/once.go +++ b/src/sync/once.go @@ -52,7 +52,7 @@ type Once struct { func (o *Once) Do(f func()) { // Note: Here is an incorrect implementation of Do: // - // if o.done.CompareAndSwap(0, 1) { + // if o.done.CompareAndSwap(false, true) { // f() // } // diff --git a/src/sync/waitgroup.go b/src/sync/waitgroup.go index 5b035aa3967..29117ab9a9d 100644 --- a/src/sync/waitgroup.go +++ b/src/sync/waitgroup.go @@ -204,13 +204,14 @@ func (wg *WaitGroup) Wait() { } } runtime_SemacquireWaitGroup(&wg.sema, synctestDurable) - if wg.state.Load() != 0 { - panic("sync: WaitGroup is reused before previous Wait has returned") - } + isReset := wg.state.Load() != 0 if race.Enabled { race.Enable() race.Acquire(unsafe.Pointer(wg)) } + if isReset { + panic("sync: WaitGroup is reused before previous Wait has returned") + } return } } @@ -235,7 +236,25 @@ func (wg *WaitGroup) Wait() { func (wg *WaitGroup) Go(f func()) { wg.Add(1) go func() { - defer wg.Done() + defer func() { + if x := recover(); x != nil { + // f panicked, which will be fatal because + // this is a new goroutine. + // + // Calling Done will unblock Wait in the main goroutine, + // allowing it to race with the fatal panic and + // possibly even exit the process (os.Exit(0)) + // before the panic completes. + // + // This is almost certainly undesirable, + // so instead avoid calling Done and simply panic. + panic(x) + } + + // f completed normally, or abruptly using goexit. + // Either way, decrement the semaphore. + wg.Done() + }() f() }() } diff --git a/src/sync/waitgroup_test.go b/src/sync/waitgroup_test.go index 8a948f8972c..6a640ade22e 100644 --- a/src/sync/waitgroup_test.go +++ b/src/sync/waitgroup_test.go @@ -5,6 +5,11 @@ package sync_test import ( + "bytes" + "internal/testenv" + "os" + "os/exec" + "strings" . "sync" "sync/atomic" "testing" @@ -110,6 +115,32 @@ func TestWaitGroupGo(t *testing.T) { } } +// This test ensures that an unhandled panic in a Go goroutine terminates +// the process without causing Wait to unblock; previously there was a race. +func TestIssue76126(t *testing.T) { + testenv.MustHaveExec(t) + if os.Getenv("SYNC_TEST_CHILD") != "1" { + // Call child in a child process + // and inspect its failure message. + cmd := exec.Command(os.Args[0], "-test.run=^TestIssue76126$") + cmd.Env = append(os.Environ(), "SYNC_TEST_CHILD=1") + buf := new(bytes.Buffer) + cmd.Stderr = buf + cmd.Run() // ignore error + got := buf.String() + if !strings.Contains(got, "panic: test") { + t.Errorf("missing panic: test\n%s", got) + } + return + } + var wg WaitGroup + wg.Go(func() { + panic("test") + }) + wg.Wait() // process should terminate here + panic("Wait returned") // must not be reached +} + func BenchmarkWaitGroupUncontended(b *testing.B) { type PaddedWaitGroup struct { WaitGroup diff --git a/src/syscall/exec_libc2.go b/src/syscall/exec_libc2.go index a0579627a30..5de09dfe998 100644 --- a/src/syscall/exec_libc2.go +++ b/src/syscall/exec_libc2.go @@ -59,7 +59,6 @@ func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr r1 uintptr nextfd int i int - err error pgrp _C_int cred *Credential ngroups, groups uintptr @@ -99,8 +98,12 @@ func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr // Enable tracing if requested. if sys.Ptrace { - if err = ptrace(PTRACE_TRACEME, 0, 0, 0); err != nil { - err1 = err.(Errno) + if runtime.GOOS == "ios" { + err1 = ENOSYS + goto childerror + } + _, _, err1 = rawSyscall6(abi.FuncPCABI0(libc_ptrace_trampoline), PTRACE_TRACEME, 0, 0, 0, 0, 0) + if err1 != 0 { goto childerror } } diff --git a/src/syscall/exec_linux.go b/src/syscall/exec_linux.go index 14c13e273a4..73582d0aea0 100644 --- a/src/syscall/exec_linux.go +++ b/src/syscall/exec_linux.go @@ -8,7 +8,7 @@ package syscall import ( errpkg "errors" - "internal/itoa" + "internal/strconv" "runtime" "unsafe" ) @@ -681,7 +681,7 @@ childerror: func formatIDMappings(idMap []SysProcIDMap) []byte { var data []byte for _, im := range idMap { - data = append(data, itoa.Itoa(im.ContainerID)+" "+itoa.Itoa(im.HostID)+" "+itoa.Itoa(im.Size)+"\n"...) + data = append(data, strconv.Itoa(im.ContainerID)+" "+strconv.Itoa(im.HostID)+" "+strconv.Itoa(im.Size)+"\n"...) } return data } @@ -710,7 +710,7 @@ func writeIDMappings(path string, idMap []SysProcIDMap) error { // This is needed since kernel 3.19, because you can't write gid_map without // disabling setgroups() system call. func writeSetgroups(pid int, enable bool) error { - sgf := "/proc/" + itoa.Itoa(pid) + "/setgroups" + sgf := "/proc/" + strconv.Itoa(pid) + "/setgroups" fd, err := Open(sgf, O_RDWR, 0) if err != nil { return err @@ -735,7 +735,7 @@ func writeSetgroups(pid int, enable bool) error { // for a process and it is called from the parent process. func writeUidGidMappings(pid int, sys *SysProcAttr) error { if sys.UidMappings != nil { - uidf := "/proc/" + itoa.Itoa(pid) + "/uid_map" + uidf := "/proc/" + strconv.Itoa(pid) + "/uid_map" if err := writeIDMappings(uidf, sys.UidMappings); err != nil { return err } @@ -746,7 +746,7 @@ func writeUidGidMappings(pid int, sys *SysProcAttr) error { if err := writeSetgroups(pid, sys.GidMappingsEnableSetgroups); err != nil && err != ENOENT { return err } - gidf := "/proc/" + itoa.Itoa(pid) + "/gid_map" + gidf := "/proc/" + strconv.Itoa(pid) + "/gid_map" if err := writeIDMappings(gidf, sys.GidMappings); err != nil { return err } diff --git a/src/syscall/exec_linux_test.go b/src/syscall/exec_linux_test.go index 69d49169446..ac04e51b144 100644 --- a/src/syscall/exec_linux_test.go +++ b/src/syscall/exec_linux_test.go @@ -51,7 +51,6 @@ func whoamiNEWUSER(t *testing.T, uid, gid int, setgroups bool) *exec.Cmd { func TestCloneNEWUSERAndRemap(t *testing.T) { for _, setgroups := range []bool{false, true} { - setgroups := setgroups t.Run(fmt.Sprintf("setgroups=%v", setgroups), func(t *testing.T) { uid := os.Getuid() gid := os.Getgid() diff --git a/src/syscall/exec_plan9.go b/src/syscall/exec_plan9.go index 91705e175ed..f0481e8bf09 100644 --- a/src/syscall/exec_plan9.go +++ b/src/syscall/exec_plan9.go @@ -7,7 +7,7 @@ package syscall import ( - "internal/itoa" + "internal/strconv" "runtime" "sync" "unsafe" @@ -327,7 +327,7 @@ func cexecPipe(p []int) error { return e } - fd, e := Open("#d/"+itoa.Itoa(p[1]), O_RDWR|O_CLOEXEC) + fd, e := Open("#d/"+strconv.Itoa(p[1]), O_RDWR|O_CLOEXEC) if e != nil { Close(p[0]) Close(p[1]) diff --git a/src/syscall/mksyscall.pl b/src/syscall/mksyscall.pl index b46a3f9438b..6be94c9c44f 100755 --- a/src/syscall/mksyscall.pl +++ b/src/syscall/mksyscall.pl @@ -141,12 +141,6 @@ while(<>) { # without reading the header. $text .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"; - if ((($darwin || ($openbsd && $libc)) && $func =~ /^ptrace(Ptr)?$/)) { - # The ptrace function is called from forkAndExecInChild where stack - # growth is forbidden. - $text .= "//go:nosplit\n" - } - # Go function header. my $out_decl = @out ? sprintf(" (%s)", join(', ', @out)) : ""; $text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out_decl; diff --git a/src/syscall/syscall_darwin.go b/src/syscall/syscall_darwin.go index 7b4f0d4fb74..ca76cc2962b 100644 --- a/src/syscall/syscall_darwin.go +++ b/src/syscall/syscall_darwin.go @@ -323,10 +323,126 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { return n, nil } +// errno return e if int32(r) is -1, else it returns 0. +// +//go:nosplit +func errno(r uintptr, e Errno) Errno { + if int32(r) == -1 { + return e + } + return 0 +} + +// errnoX return e if r is -1, else it returns 0. +// +//go:nosplit +func errnoX(r uintptr, e Errno) Errno { + if r == ^uintptr(0) { + return e + } + return 0 +} + +// errnoPtr return e if r is 0, else it returns 0. +// +//go:nosplit +func errnoPtr(r uintptr, e Errno) Errno { + if r == 0 { + return e + } + return 0 +} + +//go:cgo_import_dynamic libc_error __error "/usr/lib/libSystem.B.dylib" + +// golang.org/x/sys linknames the following syscalls. +// Do not remove or change the type signature. + +//go:linkname syscall +//go:nosplit +//go:uintptrkeepalive +func syscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) { + r1, r2, err = syscalln(fn, a1, a2, a3) + return r1, r2, errno(r1, err) +} + +//go:linkname syscallX +//go:nosplit +//go:uintptrkeepalive +func syscallX(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) { + r1, r2, err = syscalln(fn, a1, a2, a3) + return r1, r2, errnoX(r1, err) +} + +// syscall.syscall6 is meant for package syscall (and x/sys), +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - github.com/tetratelabs/wazero +// +// See go.dev/issue/67401. +// +//go:linkname syscall6 +//go:nosplit +//go:uintptrkeepalive +func syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) { + r1, r2, err = syscalln(fn, a1, a2, a3, a4, a5, a6) + return r1, r2, errno(r1, err) +} + +//go:linkname syscall6X +//go:nosplit +//go:uintptrkeepalive +func syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) { + r1, r2, err = syscalln(fn, a1, a2, a3, a4, a5, a6) + return r1, r2, errnoX(r1, err) +} + +// syscall9 is used in [internal/syscall/unix]. +// +//go:linkname syscall9 +//go:nosplit +//go:uintptrkeepalive +func syscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) { + r1, r2, err = syscalln(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9) + return r1, r2, errno(r1, err) +} + +//go:linkname rawSyscall +//go:nosplit +//go:uintptrkeepalive +func rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) { + r1, r2, err = rawsyscalln(fn, a1, a2, a3) + return r1, r2, errno(r1, err) +} + +//go:linkname rawSyscall6 +//go:nosplit +//go:uintptrkeepalive +func rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) { + r1, r2, err = rawsyscalln(fn, a1, a2, a3, a4, a5, a6) + return r1, r2, errno(r1, err) +} + +//go:linkname rawSyscall9 +//go:nosplit +//go:uintptrkeepalive +func rawSyscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) { + r1, r2, err = rawsyscalln(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9) + return r1, r2, errno(r1, err) +} + +//go:linkname syscallPtr +//go:nosplit +//go:uintptrkeepalive +func syscallPtr(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) { + r1, r2, e1 := syscalln(fn, a1, a2, a3) + return r1, r2, errnoPtr(r1, e1) +} + // Implemented in the runtime package (runtime/sys_darwin.go) -func syscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) -func syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) -func rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscallPtr(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) + +//go:noescape +func syscalln(fn uintptr, args ...uintptr) (r1, r2 uintptr, err Errno) + +//go:noescape +func rawsyscalln(fn uintptr, args ...uintptr) (r1, r2 uintptr, err Errno) diff --git a/src/syscall/syscall_darwin_amd64.go b/src/syscall/syscall_darwin_amd64.go index 64e54ad7308..d4de4c25a43 100644 --- a/src/syscall/syscall_darwin_amd64.go +++ b/src/syscall/syscall_darwin_amd64.go @@ -61,7 +61,4 @@ func libc_sendfile_trampoline() //go:cgo_import_dynamic libc_sendfile sendfile "/usr/lib/libSystem.B.dylib" -// Implemented in the runtime package (runtime/sys_darwin_64.go) -func syscallX(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) - func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) diff --git a/src/syscall/syscall_darwin_arm64.go b/src/syscall/syscall_darwin_arm64.go index 913c7483746..4a3dce80882 100644 --- a/src/syscall/syscall_darwin_arm64.go +++ b/src/syscall/syscall_darwin_arm64.go @@ -61,7 +61,4 @@ func libc_sendfile_trampoline() //go:cgo_import_dynamic libc_sendfile sendfile "/usr/lib/libSystem.B.dylib" -// Implemented in the runtime package (runtime/sys_darwin_64.go) -func syscallX(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) - func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) // sic diff --git a/src/syscall/syscall_js.go b/src/syscall/syscall_js.go index c320e34f260..bb8d70b73d3 100644 --- a/src/syscall/syscall_js.go +++ b/src/syscall/syscall_js.go @@ -8,8 +8,8 @@ package syscall import ( errorspkg "errors" - "internal/itoa" "internal/oserror" + "internal/strconv" "sync" "unsafe" ) @@ -62,7 +62,7 @@ func (e Errno) Error() string { return s } } - return "errno " + itoa.Itoa(int(e)) + return "errno " + strconv.Itoa(int(e)) } func (e Errno) Is(target error) bool { @@ -110,7 +110,7 @@ func (s Signal) String() string { return str } } - return "signal " + itoa.Itoa(int(s)) + return "signal " + strconv.Itoa(int(s)) } var signals = [...]string{} diff --git a/src/syscall/syscall_linux.go b/src/syscall/syscall_linux.go index ec9f771daad..9418bd84941 100644 --- a/src/syscall/syscall_linux.go +++ b/src/syscall/syscall_linux.go @@ -12,8 +12,8 @@ package syscall import ( - "internal/itoa" "internal/runtime/syscall/linux" + "internal/strconv" "runtime" "slices" "unsafe" @@ -361,7 +361,7 @@ func Futimesat(dirfd int, path string, tv []Timeval) (err error) { func Futimes(fd int, tv []Timeval) (err error) { // Believe it or not, this is the best we can do on Linux // (and is what glibc does). - return Utimes("/proc/self/fd/"+itoa.Itoa(fd), tv) + return Utimes("/proc/self/fd/"+strconv.Itoa(fd), tv) } const ImplementsGetwd = true diff --git a/src/syscall/syscall_unix.go b/src/syscall/syscall_unix.go index 7de2272b591..d957b77dc41 100644 --- a/src/syscall/syscall_unix.go +++ b/src/syscall/syscall_unix.go @@ -10,10 +10,10 @@ import ( errorspkg "errors" "internal/asan" "internal/bytealg" - "internal/itoa" "internal/msan" "internal/oserror" "internal/race" + "internal/strconv" "runtime" "sync" "unsafe" @@ -114,7 +114,7 @@ func (e Errno) Error() string { return s } } - return "errno " + itoa.Itoa(int(e)) + return "errno " + strconv.Itoa(int(e)) } func (e Errno) Is(target error) bool { @@ -176,7 +176,7 @@ func (s Signal) String() string { return str } } - return "signal " + itoa.Itoa(int(s)) + return "signal " + strconv.Itoa(int(s)) } func Read(fd int, p []byte) (n int, err error) { diff --git a/src/syscall/syscall_wasip1.go b/src/syscall/syscall_wasip1.go index c9225293a0c..9b5f502e30e 100644 --- a/src/syscall/syscall_wasip1.go +++ b/src/syscall/syscall_wasip1.go @@ -8,8 +8,8 @@ package syscall import ( "errors" - "internal/itoa" "internal/oserror" + "internal/strconv" "unsafe" ) @@ -71,7 +71,7 @@ func (e Errno) Error() string { return s } } - return "errno " + itoa.Itoa(int(e)) + return "errno " + strconv.Itoa(int(e)) } func (e Errno) Is(target error) bool { @@ -201,7 +201,7 @@ func (s Signal) String() string { case SIGSYS: return "bad system call" default: - return "signal " + itoa.Itoa(int(s)) + return "signal " + strconv.Itoa(int(s)) } } diff --git a/src/syscall/syscall_windows.go b/src/syscall/syscall_windows.go index f86f03e20f0..3e63897b6bc 100644 --- a/src/syscall/syscall_windows.go +++ b/src/syscall/syscall_windows.go @@ -10,10 +10,10 @@ import ( errorspkg "errors" "internal/asan" "internal/bytealg" - "internal/itoa" "internal/msan" "internal/oserror" "internal/race" + "internal/strconv" "sync" "unsafe" ) @@ -170,7 +170,7 @@ func (e Errno) error() string { if err != nil { n, err = formatMessage(flags, 0, uint32(e), 0, b, nil) if err != nil { - return "winapi error #" + itoa.Itoa(int(e)) + return "winapi error #" + strconv.Itoa(int(e)) } } // trim terminating \r and \n @@ -468,6 +468,14 @@ func Open(name string, flag int, perm uint32) (fd Handle, err error) { if flag&O_TRUNC == O_TRUNC && (createmode == OPEN_EXISTING || (createmode == OPEN_ALWAYS && err == ERROR_ALREADY_EXISTS)) { err = Ftruncate(h, 0) + if err == _ERROR_INVALID_PARAMETER { + // ERROR_INVALID_PARAMETER means truncation is not supported on this file handle. + // Unix's O_TRUNC specification says to ignore O_TRUNC on named pipes and terminal devices. + // We do the same here. + if t, err1 := GetFileType(h); err1 == nil && (t == FILE_TYPE_PIPE || t == FILE_TYPE_CHAR) { + err = nil + } + } if err != nil { CloseHandle(h) return InvalidHandle, err @@ -1358,7 +1366,7 @@ func (s Signal) String() string { return str } } - return "signal " + itoa.Itoa(int(s)) + return "signal " + strconv.Itoa(int(s)) } func LoadCreateSymbolicLink() error { diff --git a/src/syscall/types_windows.go b/src/syscall/types_windows.go index b40b455e7de..3c6d18a8509 100644 --- a/src/syscall/types_windows.go +++ b/src/syscall/types_windows.go @@ -34,6 +34,10 @@ const ( WSAECONNRESET Errno = 10054 ) +const ( + _ERROR_INVALID_PARAMETER Errno = 87 +) + const ( // Invented values to support what package os expects. O_RDONLY = 0x00000 diff --git a/src/syscall/zsyscall_darwin_amd64.go b/src/syscall/zsyscall_darwin_amd64.go index 8812fb12cd1..fe4a264ed25 100644 --- a/src/syscall/zsyscall_darwin_amd64.go +++ b/src/syscall/zsyscall_darwin_amd64.go @@ -2011,7 +2011,6 @@ func libc_fstatat64_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -//go:nosplit func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { if runtime.GOOS == "ios" { panic("unimplemented") diff --git a/src/syscall/zsyscall_darwin_arm64.go b/src/syscall/zsyscall_darwin_arm64.go index 22b096349d4..8fd7392d5b5 100644 --- a/src/syscall/zsyscall_darwin_arm64.go +++ b/src/syscall/zsyscall_darwin_arm64.go @@ -2011,7 +2011,6 @@ func libc_fstatat_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -//go:nosplit func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { if runtime.GOOS == "ios" { panic("unimplemented") diff --git a/src/syscall/zsyscall_openbsd_386.go b/src/syscall/zsyscall_openbsd_386.go index c8cf7f231b1..d914e19da0d 100644 --- a/src/syscall/zsyscall_openbsd_386.go +++ b/src/syscall/zsyscall_openbsd_386.go @@ -1839,7 +1839,6 @@ func libc_exit_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -//go:nosplit func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := syscall6(abi.FuncPCABI0(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/src/syscall/zsyscall_openbsd_amd64.go b/src/syscall/zsyscall_openbsd_amd64.go index 9188756a870..5efe4cd8686 100644 --- a/src/syscall/zsyscall_openbsd_amd64.go +++ b/src/syscall/zsyscall_openbsd_amd64.go @@ -1839,7 +1839,6 @@ func libc_exit_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -//go:nosplit func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := syscall6(abi.FuncPCABI0(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/src/syscall/zsyscall_openbsd_arm.go b/src/syscall/zsyscall_openbsd_arm.go index ecdfa636721..db8ea482ef5 100644 --- a/src/syscall/zsyscall_openbsd_arm.go +++ b/src/syscall/zsyscall_openbsd_arm.go @@ -1839,7 +1839,6 @@ func libc_exit_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -//go:nosplit func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := syscall6(abi.FuncPCABI0(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/src/syscall/zsyscall_openbsd_arm64.go b/src/syscall/zsyscall_openbsd_arm64.go index d28d3c5e1ea..673791c8241 100644 --- a/src/syscall/zsyscall_openbsd_arm64.go +++ b/src/syscall/zsyscall_openbsd_arm64.go @@ -1839,7 +1839,6 @@ func libc_exit_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -//go:nosplit func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := syscall6(abi.FuncPCABI0(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/src/syscall/zsyscall_openbsd_ppc64.go b/src/syscall/zsyscall_openbsd_ppc64.go index 0e6828bcafd..7e0dc88a4c1 100644 --- a/src/syscall/zsyscall_openbsd_ppc64.go +++ b/src/syscall/zsyscall_openbsd_ppc64.go @@ -1839,7 +1839,6 @@ func libc_exit_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -//go:nosplit func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := syscall6(abi.FuncPCABI0(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/src/syscall/zsyscall_openbsd_riscv64.go b/src/syscall/zsyscall_openbsd_riscv64.go index 920147074d0..7b5a89ceac1 100644 --- a/src/syscall/zsyscall_openbsd_riscv64.go +++ b/src/syscall/zsyscall_openbsd_riscv64.go @@ -1839,7 +1839,6 @@ func libc_exit_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -//go:nosplit func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := syscall6(abi.FuncPCABI0(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/src/testing/flag_test.go b/src/testing/flag_test.go index 6a7754425d5..fb65e159281 100644 --- a/src/testing/flag_test.go +++ b/src/testing/flag_test.go @@ -25,7 +25,6 @@ func TestFlag(t *testing.T) { testenv.MustHaveExec(t) for _, flag := range []string{"", "-test.v", "-test.v=test2json"} { - flag := flag t.Run(flag, func(t *testing.T) { t.Parallel() cmd := exec.Command(testenv.Executable(t), "-test.run=^TestFlag$", "-test_flag_arg="+flag) diff --git a/src/testing/fstest/testfs.go b/src/testing/fstest/testfs.go index 1fb84b89284..72830a09a72 100644 --- a/src/testing/fstest/testfs.go +++ b/src/testing/fstest/testfs.go @@ -29,7 +29,7 @@ import ( // The contents of fsys must not change concurrently with TestFS. // // If TestFS finds any misbehaviors, it returns either the first error or a -// list of errors. Use [errors.Is] or [errors.As] to inspect. +// list of errors. Use [errors.Is] or [errors.AsType] to inspect. // // Typical usage inside a test is: // diff --git a/src/testing/fstest/testfs_test.go b/src/testing/fstest/testfs_test.go index d6d6d89b89f..a0561b6dfc6 100644 --- a/src/testing/fstest/testfs_test.go +++ b/src/testing/fstest/testfs_test.go @@ -105,8 +105,12 @@ func TestTestFSWrappedErrors(t *testing.T) { // TestFS is expected to return a list of errors. // Enforce that the list can be extracted for browsing. - var errs interface{ Unwrap() []error } - if !errors.As(err, &errs) { + type wrapper interface { + error + Unwrap() []error + } + errs, ok := errors.AsType[wrapper](err) + if !ok { t.Errorf("caller should be able to extract the errors as a list: %#v", err) } else { for _, err := range errs.Unwrap() { diff --git a/src/testing/internal/testdeps/deps.go b/src/testing/internal/testdeps/deps.go index 6f42d4722ca..5ab377daeb6 100644 --- a/src/testing/internal/testdeps/deps.go +++ b/src/testing/internal/testdeps/deps.go @@ -66,6 +66,12 @@ func (TestDeps) ImportPath() string { return ImportPath } +var ModulePath string + +func (TestDeps) ModulePath() string { + return ModulePath +} + // testLog implements testlog.Interface, logging actions by package os. type testLog struct { mu sync.Mutex diff --git a/src/testing/iotest/reader_test.go b/src/testing/iotest/reader_test.go index 1d222372caf..cecfbfce492 100644 --- a/src/testing/iotest/reader_test.go +++ b/src/testing/iotest/reader_test.go @@ -238,7 +238,6 @@ func TestErrReader(t *testing.T) { } for _, tt := range cases { - tt := tt t.Run(tt.name, func(t *testing.T) { n, err := ErrReader(tt.err).Read(nil) if err != tt.err { diff --git a/src/testing/panic_test.go b/src/testing/panic_test.go index fc84175ee6f..01a34b08019 100644 --- a/src/testing/panic_test.go +++ b/src/testing/panic_test.go @@ -198,7 +198,6 @@ func TestPanicHelper(t *testing.T) { } }) for i := 0; i < 3; i++ { - i := i t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { chosen := t.Name() == *testPanicTest if chosen && *testPanicCleanup { diff --git a/src/testing/sub_test.go b/src/testing/sub_test.go index bb5586d9fcb..5d5573ccec5 100644 --- a/src/testing/sub_test.go +++ b/src/testing/sub_test.go @@ -988,7 +988,6 @@ func TestConcurrentCleanup(t *T) { var wg sync.WaitGroup wg.Add(2) for i := 0; i < 2; i++ { - i := i go func() { t.Cleanup(func() { // Although the calls to Cleanup are concurrent, the functions passed diff --git a/src/testing/synctest/run.go b/src/testing/synctest/run.go deleted file mode 100644 index 2e668ab8634..00000000000 --- a/src/testing/synctest/run.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.synctest - -package synctest - -import "internal/synctest" - -// Run is deprecated. -// -// Deprecated: Use Test instead. Run will be removed in Go 1.26. -func Run(f func()) { - synctest.Run(f) -} diff --git a/src/testing/testing.go b/src/testing/testing.go index 3f764465493..0d1d08ca89a 100644 --- a/src/testing/testing.go +++ b/src/testing/testing.go @@ -420,7 +420,6 @@ import ( "sync/atomic" "time" "unicode" - "unicode/utf8" _ "unsafe" // for linkname ) @@ -456,6 +455,7 @@ func Init() { // this flag lets "go test" tell the binary to write the files in the directory where // the "go test" command is run. outputDir = flag.String("test.outputdir", "", "write profiles to `dir`") + artifacts = flag.Bool("test.artifacts", false, "store test artifacts in test.,outputdir") // Report as tests are run; default is silent for success. flag.Var(&chatty, "test.v", "verbose: print additional output") count = flag.Uint("test.count", 1, "run tests and benchmarks `n` times") @@ -489,6 +489,7 @@ var ( short *bool failFast *bool outputDir *string + artifacts *bool chatty chattyFlag count *uint coverProfile *string @@ -516,6 +517,7 @@ var ( cpuList []int testlogFile *os.File + artifactDir string numFailed atomic.Uint32 // number of test failures @@ -653,15 +655,17 @@ type common struct { runner string // Function name of tRunner running the test. isParallel bool // Whether the test is parallel. - parent *common - level int // Nesting depth of test or benchmark. - creator []uintptr // If level > 0, the stack trace at the point where the parent called t.Run. - name string // Name of test or benchmark. - start highPrecisionTime // Time test or benchmark started - duration time.Duration - barrier chan bool // To signal parallel subtests they may start. Nil when T.Parallel is not present (B) or not usable (when fuzzing). - signal chan bool // To signal a test is done. - sub []*T // Queue of subtests to be run in parallel. + parent *common + level int // Nesting depth of test or benchmark. + creator []uintptr // If level > 0, the stack trace at the point where the parent called t.Run. + modulePath string + importPath string + name string // Name of test or benchmark. + start highPrecisionTime // Time test or benchmark started + duration time.Duration + barrier chan bool // To signal parallel subtests they may start. Nil when T.Parallel is not present (B) or not usable (when fuzzing). + signal chan bool // To signal a test is done. + sub []*T // Queue of subtests to be run in parallel. lastRaceErrors atomic.Int64 // Max value of race.Errors seen during the test or its subtests. raceErrorLogged atomic.Bool @@ -671,6 +675,10 @@ type common struct { tempDirErr error tempDirSeq int32 + artifactDirOnce sync.Once + artifactDir string + artifactDirErr error + ctx context.Context cancelCtx context.CancelFunc } @@ -879,6 +887,7 @@ func fmtDuration(d time.Duration) string { // TB is the interface common to [T], [B], and [F]. type TB interface { + ArtifactDir() string Attr(key, value string) Cleanup(func()) Error(args ...any) @@ -1313,6 +1322,96 @@ func (c *common) Cleanup(f func()) { c.cleanups = append(c.cleanups, fn) } +// ArtifactDir returns a directory in which the test should store output files. +// When the -artifacts flag is provided, this directory is located +// under the output directory. Otherwise, ArtifactDir returns a temporary directory +// that is removed after the test completes. +// +// Each test or subtest within each test package has a unique artifact directory. +// Repeated calls to ArtifactDir in the same test or subtest return the same directory. +// Subtest outputs are not located under the parent test's output directory. +func (c *common) ArtifactDir() string { + c.checkFuzzFn("ArtifactDir") + c.artifactDirOnce.Do(func() { + c.artifactDir, c.artifactDirErr = c.makeArtifactDir() + }) + if c.artifactDirErr != nil { + c.Fatalf("ArtifactDir: %v", c.artifactDirErr) + } + return c.artifactDir +} + +func hashString(s string) (h uint64) { + // FNV, used here to avoid a dependency on maphash. + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= 1099511628211 + } + return +} + +// makeArtifactDir creates the artifact directory for a test. +// The artifact directory is: +// +// /_artifacts/// +// +// The test package is the package import path with the module name prefix removed. +// The test name is truncated if too long. +// Special characters are removed from the path. +func (c *common) makeArtifactDir() (string, error) { + if !*artifacts { + return c.makeTempDir() + } + + // If the test name is longer than maxNameSize, truncate it and replace the last + // hashSize bytes with a hash of the full name. + const maxNameSize = 64 + name := strings.ReplaceAll(c.name, "/", "__") + if len(name) > maxNameSize { + h := fmt.Sprintf("%0x", hashString(name)) + name = name[:maxNameSize-len(h)] + h + } + + // Remove the module path prefix from the import path. + pkg := strings.TrimPrefix(c.importPath, c.modulePath+"/") + + // Join with /, not filepath.Join: the import path is /-separated, + // and we don't want removeSymbolsExcept to strip \ separators on Windows. + base := "/" + pkg + "/" + name + base = removeSymbolsExcept(base, "!#$%&()+,-.=@^_{}~ /") + base, err := filepath.Localize(base) + if err != nil { + // This name can't be safely converted into a local filepath. + // Drop it and just use _artifacts/. + base = "" + } + + artifactBase := filepath.Join(artifactDir, base) + if err := os.MkdirAll(artifactBase, 0o777); err != nil { + return "", err + } + dir, err := os.MkdirTemp(artifactBase, "") + if err != nil { + return "", err + } + if c.chatty != nil { + c.chatty.Updatef(c.name, "=== ARTIFACTS %s %v\n", c.name, dir) + } + return dir, nil +} + +func removeSymbolsExcept(s, allowed string) string { + mapper := func(r rune) rune { + if unicode.IsLetter(r) || + unicode.IsNumber(r) || + strings.ContainsRune(allowed, r) { + return r + } + return -1 // disallowed symbol + } + return strings.Map(mapper, s) +} + // TempDir returns a temporary directory for the test to use. // The directory is automatically removed when the test and // all its subtests complete. @@ -1322,6 +1421,14 @@ func (c *common) Cleanup(f func()) { // be created somewhere beneath it. func (c *common) TempDir() string { c.checkFuzzFn("TempDir") + dir, err := c.makeTempDir() + if err != nil { + c.Fatalf("TempDir: %v", err) + } + return dir +} + +func (c *common) makeTempDir() (string, error) { // Use a single parent directory for all the temporary directories // created by a test, each numbered sequentially. c.tempDirMu.Lock() @@ -1332,7 +1439,7 @@ func (c *common) TempDir() string { _, err := os.Stat(c.tempDir) nonExistent = os.IsNotExist(err) if err != nil && !nonExistent { - c.Fatalf("TempDir: %v", err) + return "", err } } @@ -1347,23 +1454,9 @@ func (c *common) TempDir() string { // Drop unusual characters (such as path separators or // characters interacting with globs) from the directory name to // avoid surprising os.MkdirTemp behavior. - mapper := func(r rune) rune { - if r < utf8.RuneSelf { - const allowed = "!#$%&()+,-.=@^_{}~ " - if '0' <= r && r <= '9' || - 'a' <= r && r <= 'z' || - 'A' <= r && r <= 'Z' { - return r - } - if strings.ContainsRune(allowed, r) { - return r - } - } else if unicode.IsLetter(r) || unicode.IsNumber(r) { - return r - } - return -1 - } - pattern = strings.Map(mapper, pattern) + const allowed = "!#$%&()+,-.=@^_{}~ " + pattern = removeSymbolsExcept(pattern, allowed) + c.tempDir, c.tempDirErr = os.MkdirTemp(os.Getenv("GOTMPDIR"), pattern) if c.tempDirErr == nil { c.Cleanup(func() { @@ -1381,14 +1474,14 @@ func (c *common) TempDir() string { c.tempDirMu.Unlock() if c.tempDirErr != nil { - c.Fatalf("TempDir: %v", c.tempDirErr) + return "", c.tempDirErr } dir := fmt.Sprintf("%s%c%03d", c.tempDir, os.PathSeparator, seq) if err := os.Mkdir(dir, 0o777); err != nil { - c.Fatalf("TempDir: %v", err) + return "", err } - return dir + return dir, nil } // removeAll is like os.RemoveAll, but retries Windows "Access is denied." @@ -1971,15 +2064,17 @@ func (t *T) Run(name string, f func(t *T)) bool { ctx, cancelCtx := context.WithCancel(context.Background()) t = &T{ common: common{ - barrier: make(chan bool), - signal: make(chan bool, 1), - name: testName, - parent: &t.common, - level: t.level + 1, - creator: pc[:n], - chatty: t.chatty, - ctx: ctx, - cancelCtx: cancelCtx, + barrier: make(chan bool), + signal: make(chan bool, 1), + name: testName, + modulePath: t.modulePath, + importPath: t.importPath, + parent: &t.common, + level: t.level + 1, + creator: pc[:n], + chatty: t.chatty, + ctx: ctx, + cancelCtx: cancelCtx, }, tstate: t.tstate, } @@ -2140,6 +2235,7 @@ func (f matchStringOnly) MatchString(pat, str string) (bool, error) { return f func (f matchStringOnly) StartCPUProfile(w io.Writer) error { return errMain } func (f matchStringOnly) StopCPUProfile() {} func (f matchStringOnly) WriteProfileTo(string, io.Writer, int) error { return errMain } +func (f matchStringOnly) ModulePath() string { return "" } func (f matchStringOnly) ImportPath() string { return "" } func (f matchStringOnly) StartTestLog(io.Writer) {} func (f matchStringOnly) StopTestLog() error { return errMain } @@ -2193,6 +2289,7 @@ type M struct { // testing/internal/testdeps's TestDeps. type testDeps interface { ImportPath() string + ModulePath() string MatchString(pat, str string) (bool, error) SetPanicOnExit0(bool) StartCPUProfile(io.Writer) error @@ -2336,7 +2433,7 @@ func (m *M) Run() (code int) { if !*isFuzzWorker { deadline := m.startAlarm() haveExamples = len(m.examples) > 0 - testRan, testOk := runTests(m.deps.MatchString, m.tests, deadline) + testRan, testOk := runTests(m.deps.ModulePath(), m.deps.ImportPath(), m.deps.MatchString, m.tests, deadline) fuzzTargetsRan, fuzzTargetsOk := runFuzzTests(m.deps, m.fuzzTargets, deadline) exampleRan, exampleOk := runExamples(m.deps.MatchString, m.examples) m.stopAlarm() @@ -2437,14 +2534,14 @@ func RunTests(matchString func(pat, str string) (bool, error), tests []InternalT if *timeout > 0 { deadline = time.Now().Add(*timeout) } - ran, ok := runTests(matchString, tests, deadline) + ran, ok := runTests("", "", matchString, tests, deadline) if !ran && !haveExamples { fmt.Fprintln(os.Stderr, "testing: warning: no tests to run") } return ok } -func runTests(matchString func(pat, str string) (bool, error), tests []InternalTest, deadline time.Time) (ran, ok bool) { +func runTests(modulePath, importPath string, matchString func(pat, str string) (bool, error), tests []InternalTest, deadline time.Time) (ran, ok bool) { ok = true for _, procs := range cpuList { runtime.GOMAXPROCS(procs) @@ -2463,11 +2560,13 @@ func runTests(matchString func(pat, str string) (bool, error), tests []InternalT tstate.deadline = deadline t := &T{ common: common{ - signal: make(chan bool, 1), - barrier: make(chan bool), - w: os.Stdout, - ctx: ctx, - cancelCtx: cancelCtx, + signal: make(chan bool, 1), + barrier: make(chan bool), + w: os.Stdout, + ctx: ctx, + cancelCtx: cancelCtx, + modulePath: modulePath, + importPath: importPath, }, tstate: tstate, } @@ -2536,6 +2635,18 @@ func (m *M) before() { fmt.Fprintf(os.Stderr, "testing: cannot use -test.gocoverdir because test binary was not built with coverage enabled\n") os.Exit(2) } + if *artifacts { + var err error + artifactDir, err = filepath.Abs(toOutputDir("_artifacts")) + if err != nil { + fmt.Fprintf(os.Stderr, "testing: cannot make -test.outputdir absolute: %v\n", err) + os.Exit(2) + } + if err := os.Mkdir(artifactDir, 0o777); err != nil && !errors.Is(err, os.ErrExist) { + fmt.Fprintf(os.Stderr, "testing: %v\n", err) + os.Exit(2) + } + } if *testlog != "" { // Note: Not using toOutputDir. // This file is for use by cmd/go, not users. diff --git a/src/testing/testing_test.go b/src/testing/testing_test.go index cc89e4144e6..167f4a0b457 100644 --- a/src/testing/testing_test.go +++ b/src/testing/testing_test.go @@ -469,7 +469,7 @@ func TestTesting(t *testing.T) { // runTest runs a helper test with -test.v, ignoring its exit status. // runTest both logs and returns the test output. -func runTest(t *testing.T, test string) []byte { +func runTest(t *testing.T, test string, args ...string) []byte { t.Helper() testenv.MustHaveExec(t) @@ -477,6 +477,7 @@ func runTest(t *testing.T, test string) []byte { cmd := testenv.Command(t, testenv.Executable(t), "-test.run=^"+test+"$", "-test.bench="+test, "-test.v", "-test.parallel=2", "-test.benchtime=2x") cmd = testenv.CleanCmdEnv(cmd) cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1") + cmd.Args = append(cmd.Args, args...) out, err := cmd.CombinedOutput() t.Logf("%v: %v\n%s", cmd, err, out) @@ -1055,6 +1056,105 @@ func TestAttrInvalid(t *testing.T) { } } +const artifactContent = "It belongs in a museum.\n" + +func TestArtifactDirExample(t *testing.T) { + os.WriteFile(filepath.Join(t.ArtifactDir(), "artifact"), []byte(artifactContent), 0o666) +} + +func TestArtifactDirDefault(t *testing.T) { + tempDir := t.TempDir() + t.Chdir(tempDir) + out := runTest(t, "TestArtifactDirExample", "-test.artifacts") + checkArtifactDir(t, out, "TestArtifactDirExample", tempDir) +} + +func TestArtifactDirSpecified(t *testing.T) { + tempDir := t.TempDir() + out := runTest(t, "TestArtifactDirExample", "-test.artifacts", "-test.outputdir="+tempDir) + checkArtifactDir(t, out, "TestArtifactDirExample", tempDir) +} + +func TestArtifactDirNoArtifacts(t *testing.T) { + t.Chdir(t.TempDir()) + out := string(runTest(t, "TestArtifactDirExample")) + if strings.Contains(out, "=== ARTIFACTS") { + t.Errorf("expected output with no === ARTIFACTS, got\n%q", out) + } + ents, err := os.ReadDir(".") + if err != nil { + t.Fatal(err) + } + for _, e := range ents { + t.Errorf("unexpected file in current directory after test: %v", e.Name()) + } +} + +func TestArtifactDirSubtestExample(t *testing.T) { + t.Run("Subtest", func(t *testing.T) { + os.WriteFile(filepath.Join(t.ArtifactDir(), "artifact"), []byte(artifactContent), 0o666) + }) +} + +func TestArtifactDirInSubtest(t *testing.T) { + tempDir := t.TempDir() + out := runTest(t, "TestArtifactDirSubtestExample/Subtest", "-test.artifacts", "-test.outputdir="+tempDir) + checkArtifactDir(t, out, "TestArtifactDirSubtestExample/Subtest", tempDir) +} + +func TestArtifactDirLongTestNameExample(t *testing.T) { + name := strings.Repeat("x", 256) + t.Run(name, func(t *testing.T) { + os.WriteFile(filepath.Join(t.ArtifactDir(), "artifact"), []byte(artifactContent), 0o666) + }) +} + +func TestArtifactDirWithLongTestName(t *testing.T) { + tempDir := t.TempDir() + out := runTest(t, "TestArtifactDirLongTestNameExample", "-test.artifacts", "-test.outputdir="+tempDir) + checkArtifactDir(t, out, `TestArtifactDirLongTestNameExample/\w+`, tempDir) +} + +func TestArtifactDirConsistent(t *testing.T) { + a := t.ArtifactDir() + b := t.ArtifactDir() + if a != b { + t.Errorf("t.ArtifactDir is not consistent between calls: %q, %q", a, b) + } +} + +func checkArtifactDir(t *testing.T, out []byte, testName, outputDir string) { + t.Helper() + + re := regexp.MustCompile(`=== ARTIFACTS ` + testName + ` ([^\n]+)`) + match := re.FindSubmatch(out) + if match == nil { + t.Fatalf("expected output matching %q, got\n%q", re, out) + } + artifactDir := string(match[1]) + + // Verify that the artifact directory is contained in the expected output directory. + relDir, err := filepath.Rel(outputDir, artifactDir) + if err != nil { + t.Fatal(err) + } + if !filepath.IsLocal(relDir) { + t.Fatalf("want artifact directory contained in %q, got %q", outputDir, artifactDir) + } + + for _, part := range strings.Split(relDir, string(os.PathSeparator)) { + const maxSize = 64 + if len(part) > maxSize { + t.Errorf("artifact directory %q contains component >%v characters long: %q", relDir, maxSize, part) + } + } + + got, err := os.ReadFile(filepath.Join(artifactDir, "artifact")) + if err != nil || string(got) != artifactContent { + t.Errorf("reading artifact in %q: got %q, %v; want %q", artifactDir, got, err, artifactContent) + } +} + func TestBenchmarkBLoopIterationCorrect(t *testing.T) { out := runTest(t, "BenchmarkBLoopPrint") c := bytes.Count(out, []byte("Printing from BenchmarkBLoopPrint")) @@ -1110,3 +1210,7 @@ func BenchmarkBNPrint(b *testing.B) { b.Logf("Printing from BenchmarkBNPrint") } } + +func TestArtifactDir(t *testing.T) { + t.Log(t.ArtifactDir()) +} diff --git a/src/text/template/exec_test.go b/src/text/template/exec_test.go index 65440901a0b..8665f3ad498 100644 --- a/src/text/template/exec_test.go +++ b/src/text/template/exec_test.go @@ -1015,8 +1015,7 @@ func TestExecError_CustomError(t *testing.T) { var b bytes.Buffer err := tmpl.Execute(&b, nil) - var e *CustomError - if !errors.As(err, &e) { + if _, ok := errors.AsType[*CustomError](err); !ok { t.Fatalf("expected custom error; got %s", err) } } diff --git a/src/text/template/funcs.go b/src/text/template/funcs.go index c28c3ea2002..30b3243a5a8 100644 --- a/src/text/template/funcs.go +++ b/src/text/template/funcs.go @@ -22,7 +22,7 @@ import ( // return value evaluates to non-nil during execution, execution terminates and // Execute returns that error. // -// Errors returned by Execute wrap the underlying error; call [errors.As] to +// Errors returned by Execute wrap the underlying error; call [errors.AsType] to // unwrap them. // // When template execution invokes a function with an argument list, that list diff --git a/src/text/template/parse/parse.go b/src/text/template/parse/parse.go index 84d639d78d1..b74dfb7f4ea 100644 --- a/src/text/template/parse/parse.go +++ b/src/text/template/parse/parse.go @@ -35,7 +35,7 @@ type Tree struct { stackDepth int // depth of nested parenthesized expressions } -// A mode value is a set of flags (or 0). Modes control parser behavior. +// A Mode value is a set of flags (or 0). Modes control parser behavior. type Mode uint const ( diff --git a/src/time/sleep_test.go b/src/time/sleep_test.go index b9e81b98feb..c87f420f8f3 100644 --- a/src/time/sleep_test.go +++ b/src/time/sleep_test.go @@ -937,7 +937,6 @@ func BenchmarkParallelTimerLatency(b *testing.B) { wg.Add(timerCount) atomic.StoreInt32(&count, 0) for j := 0; j < timerCount; j++ { - j := j expectedWakeup := Now().Add(delay) AfterFunc(delay, func() { late := Since(expectedWakeup) @@ -1011,7 +1010,6 @@ func BenchmarkStaggeredTickerLatency(b *testing.B) { var wg sync.WaitGroup wg.Add(tickerCount) for j := 0; j < tickerCount; j++ { - j := j doWork(delay / Duration(gmp)) expectedWakeup := Now().Add(delay) ticker := NewTicker(delay) diff --git a/src/time/time.go b/src/time/time.go index cf9abc7196f..32a1ce4307d 100644 --- a/src/time/time.go +++ b/src/time/time.go @@ -260,7 +260,11 @@ func (t *Time) mono() int64 { // IsZero reports whether t represents the zero time instant, // January 1, year 1, 00:00:00 UTC. func (t Time) IsZero() bool { - return t.sec() == 0 && t.nsec() == 0 + // If hasMonotonic is set in t.wall, then the time can't be before 1885, so it can't be the year 1. + // If hasMonotonic is zero, then all the bits in wall other than the nanoseconds field should be 0. + // So if there are no nanoseconds then t.wall == 0, and if there are no seconds then t.ext == 0. + // This is equivalent to t.sec() == 0 && t.nsec() == 0, but is more efficient. + return t.wall == 0 && t.ext == 0 } // After reports whether the time instant t is after u. diff --git a/src/time/zoneinfo_js.go b/src/time/zoneinfo_js.go index 8da34a21fba..11d944a9049 100644 --- a/src/time/zoneinfo_js.go +++ b/src/time/zoneinfo_js.go @@ -7,7 +7,7 @@ package time import ( - "internal/itoa" + "internal/strconv" "syscall/js" ) @@ -36,10 +36,10 @@ func initLocal() { } else { z.name += "+" } - z.name += itoa.Itoa(offset / 60) + z.name += strconv.Itoa(offset / 60) min := offset % 60 if min != 0 { - z.name += ":" + itoa.Itoa(min) + z.name += ":" + strconv.Itoa(min) } localLoc.zone = []zone{z} } diff --git a/src/unique/canonmap.go b/src/unique/canonmap.go index a3494eef997..74c95a12af9 100644 --- a/src/unique/canonmap.go +++ b/src/unique/canonmap.go @@ -232,7 +232,7 @@ func (m *canonMap[T]) cleanup(hash uintptr, wp weak.Pointer[T]) { // which requires the parents' lock. for i.parent != nil && i.empty() { if hashShift == 8*goarch.PtrSize { - panic("internal/sync.HashTrieMap: ran out of hash bits while iterating") + panic("unique.canonMap: ran out of hash bits while iterating") } hashShift += nChildrenLog2 diff --git a/src/vendor/golang.org/x/text/unicode/bidi/core.go b/src/vendor/golang.org/x/text/unicode/bidi/core.go index 9d2ae547b5e..fb8273236dd 100644 --- a/src/vendor/golang.org/x/text/unicode/bidi/core.go +++ b/src/vendor/golang.org/x/text/unicode/bidi/core.go @@ -427,13 +427,6 @@ type isolatingRunSequence struct { func (i *isolatingRunSequence) Len() int { return len(i.indexes) } -func maxLevel(a, b level) level { - if a > b { - return a - } - return b -} - // Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types, // either L or R, for each isolating run sequence. func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { @@ -474,8 +467,8 @@ func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { indexes: indexes, types: types, level: level, - sos: typeForLevel(maxLevel(prevLevel, level)), - eos: typeForLevel(maxLevel(succLevel, level)), + sos: typeForLevel(max(prevLevel, level)), + eos: typeForLevel(max(succLevel, level)), } } diff --git a/src/vendor/modules.txt b/src/vendor/modules.txt index a2a0c0b3e85..f1e33686ed0 100644 --- a/src/vendor/modules.txt +++ b/src/vendor/modules.txt @@ -1,4 +1,4 @@ -# golang.org/x/crypto v0.42.0 +# golang.org/x/crypto v0.43.0 ## explicit; go 1.24.0 golang.org/x/crypto/chacha20 golang.org/x/crypto/chacha20poly1305 @@ -6,7 +6,7 @@ golang.org/x/crypto/cryptobyte golang.org/x/crypto/cryptobyte/asn1 golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 -# golang.org/x/net v0.44.1-0.20251002015445-edb764c2296f +# golang.org/x/net v0.46.0 ## explicit; go 1.24.0 golang.org/x/net/dns/dnsmessage golang.org/x/net/http/httpguts @@ -15,10 +15,10 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/lif golang.org/x/net/nettest -# golang.org/x/sys v0.36.0 +# golang.org/x/sys v0.37.0 ## explicit; go 1.24.0 golang.org/x/sys/cpu -# golang.org/x/text v0.29.0 +# golang.org/x/text v0.30.0 ## explicit; go 1.24.0 golang.org/x/text/secure/bidirule golang.org/x/text/transform diff --git a/test/checkbce.go b/test/checkbce.go index 49d047443e0..51c95466e88 100644 --- a/test/checkbce.go +++ b/test/checkbce.go @@ -26,7 +26,7 @@ func f1(a [256]int, i int) { var j int useInt(a[i]) // ERROR "Found IsInBounds$" j = i % 256 - useInt(a[j]) // ERROR "Found IsInBounds$" + useInt(a[j]) j = i & 255 useInt(a[j]) j = i & 17 diff --git a/test/codegen/addrcalc.go b/test/codegen/addrcalc.go index 45552d278cd..9dddb1ba946 100644 --- a/test/codegen/addrcalc.go +++ b/test/codegen/addrcalc.go @@ -9,6 +9,6 @@ package codegen // Make sure we use ADDQ instead of LEAQ when we can. func f(p *[4][2]int, x int) *int { - // amd64:"ADDQ",-"LEAQ" + // amd64:"ADDQ" -"LEAQ" return &p[x][0] } diff --git a/test/codegen/alloc.go b/test/codegen/alloc.go index c6ff004eed3..20ccc853c63 100644 --- a/test/codegen/alloc.go +++ b/test/codegen/alloc.go @@ -10,28 +10,28 @@ package codegen func zeroAllocNew1() *struct{} { - // 386:-`CALL\truntime\.newobject`, `LEAL\truntime.zerobase` - // amd64:-`CALL\truntime\.newobject`, `LEAQ\truntime.zerobase` - // arm:-`CALL\truntime\.newobject`, `MOVW\t[$]runtime.zerobase` - // arm64:-`CALL\truntime\.newobject`, `MOVD\t[$]runtime.zerobase` - // riscv64:-`CALL\truntime\.newobject`, `MOV\t[$]runtime.zerobase` + // 386:-`CALL runtime\.newobject`, `LEAL runtime.zerobase` + // amd64:-`CALL runtime\.newobject`, `LEAQ runtime.zerobase` + // arm:-`CALL runtime\.newobject`, `MOVW [$]runtime.zerobase` + // arm64:-`CALL runtime\.newobject`, `MOVD [$]runtime.zerobase` + // riscv64:-`CALL runtime\.newobject`, `MOV [$]runtime.zerobase` return new(struct{}) } func zeroAllocNew2() *[0]int { - // 386:-`CALL\truntime\.newobject`, `LEAL\truntime.zerobase` - // amd64:-`CALL\truntime\.newobject`, `LEAQ\truntime.zerobase` - // arm:-`CALL\truntime\.newobject`, `MOVW\t[$]runtime.zerobase` - // arm64:-`CALL\truntime\.newobject`, `MOVD\t[$]runtime.zerobase` - // riscv64:-`CALL\truntime\.newobject`, `MOV\t[$]runtime.zerobase` + // 386:-`CALL runtime\.newobject`, `LEAL runtime.zerobase` + // amd64:-`CALL runtime\.newobject`, `LEAQ runtime.zerobase` + // arm:-`CALL runtime\.newobject`, `MOVW [$]runtime.zerobase` + // arm64:-`CALL runtime\.newobject`, `MOVD [$]runtime.zerobase` + // riscv64:-`CALL runtime\.newobject`, `MOV [$]runtime.zerobase` return new([0]int) } func zeroAllocSliceLit() []int { - // 386:-`CALL\truntime\.newobject`, `LEAL\truntime.zerobase` - // amd64:-`CALL\truntime\.newobject`, `LEAQ\truntime.zerobase` - // arm:-`CALL\truntime\.newobject`, `MOVW\t[$]runtime.zerobase` - // arm64:-`CALL\truntime\.newobject`, `MOVD\t[$]runtime.zerobase` - // riscv64:-`CALL\truntime\.newobject`, `MOV\t[$]runtime.zerobase` + // 386:-`CALL runtime\.newobject`, `LEAL runtime.zerobase` + // amd64:-`CALL runtime\.newobject`, `LEAQ runtime.zerobase` + // arm:-`CALL runtime\.newobject`, `MOVW [$]runtime.zerobase` + // arm64:-`CALL runtime\.newobject`, `MOVD [$]runtime.zerobase` + // riscv64:-`CALL runtime\.newobject`, `MOV [$]runtime.zerobase` return []int{} } diff --git a/test/codegen/arithmetic.go b/test/codegen/arithmetic.go index 7055db3dc9f..42d5d2ef658 100644 --- a/test/codegen/arithmetic.go +++ b/test/codegen/arithmetic.go @@ -10,55 +10,51 @@ package codegen // simplifications and optimizations on integer types. // For codegen tests on float types, see floats.go. -// ----------------- // -// Addition // -// ----------------- // +// Addition func AddLargeConst(a uint64, out []uint64) { - // ppc64x/power10:"ADD\t[$]4294967296," - // ppc64x/power9:"MOVD\t[$]1", "SLD\t[$]32" "ADD\tR[0-9]*" - // ppc64x/power8:"MOVD\t[$]1", "SLD\t[$]32" "ADD\tR[0-9]*" + // ppc64x/power10:"ADD [$]4294967296," + // ppc64x/power9:"MOVD [$]1", "SLD [$]32" "ADD R[0-9]*" + // ppc64x/power8:"MOVD [$]1", "SLD [$]32" "ADD R[0-9]*" out[0] = a + 0x100000000 - // ppc64x/power10:"ADD\t[$]-8589934592," - // ppc64x/power9:"MOVD\t[$]-1", "SLD\t[$]33" "ADD\tR[0-9]*" - // ppc64x/power8:"MOVD\t[$]-1", "SLD\t[$]33" "ADD\tR[0-9]*" + // ppc64x/power10:"ADD [$]-8589934592," + // ppc64x/power9:"MOVD [$]-1", "SLD [$]33" "ADD R[0-9]*" + // ppc64x/power8:"MOVD [$]-1", "SLD [$]33" "ADD R[0-9]*" out[1] = a + 0xFFFFFFFE00000000 - // ppc64x/power10:"ADD\t[$]1234567," - // ppc64x/power9:"ADDIS\t[$]19,", "ADD\t[$]-10617," - // ppc64x/power8:"ADDIS\t[$]19,", "ADD\t[$]-10617," + // ppc64x/power10:"ADD [$]1234567," + // ppc64x/power9:"ADDIS [$]19,", "ADD [$]-10617," + // ppc64x/power8:"ADDIS [$]19,", "ADD [$]-10617," out[2] = a + 1234567 - // ppc64x/power10:"ADD\t[$]-1234567," - // ppc64x/power9:"ADDIS\t[$]-19,", "ADD\t[$]10617," - // ppc64x/power8:"ADDIS\t[$]-19,", "ADD\t[$]10617," + // ppc64x/power10:"ADD [$]-1234567," + // ppc64x/power9:"ADDIS [$]-19,", "ADD [$]10617," + // ppc64x/power8:"ADDIS [$]-19,", "ADD [$]10617," out[3] = a - 1234567 - // ppc64x/power10:"ADD\t[$]2147450879," - // ppc64x/power9:"ADDIS\t[$]32767,", "ADD\t[$]32767," - // ppc64x/power8:"ADDIS\t[$]32767,", "ADD\t[$]32767," + // ppc64x/power10:"ADD [$]2147450879," + // ppc64x/power9:"ADDIS [$]32767,", "ADD [$]32767," + // ppc64x/power8:"ADDIS [$]32767,", "ADD [$]32767," out[4] = a + 0x7FFF7FFF - // ppc64x/power10:"ADD\t[$]-2147483647," - // ppc64x/power9:"ADDIS\t[$]-32768,", "ADD\t[$]1," - // ppc64x/power8:"ADDIS\t[$]-32768,", "ADD\t[$]1," + // ppc64x/power10:"ADD [$]-2147483647," + // ppc64x/power9:"ADDIS [$]-32768,", "ADD [$]1," + // ppc64x/power8:"ADDIS [$]-32768,", "ADD [$]1," out[5] = a - 2147483647 - // ppc64x:"ADDIS\t[$]-32768,", ^"ADD\t" + // ppc64x:"ADDIS [$]-32768,", ^"ADD " out[6] = a - 2147483648 - // ppc64x:"ADD\t[$]2147450880,", ^"ADDIS\t" + // ppc64x:"ADD [$]2147450880,", ^"ADDIS " out[7] = a + 0x7FFF8000 - // ppc64x:"ADD\t[$]-32768,", ^"ADDIS\t" + // ppc64x:"ADD [$]-32768,", ^"ADDIS " out[8] = a - 32768 - // ppc64x/power10:"ADD\t[$]-32769," - // ppc64x/power9:"ADDIS\t[$]-1,", "ADD\t[$]32767," - // ppc64x/power8:"ADDIS\t[$]-1,", "ADD\t[$]32767," + // ppc64x/power10:"ADD [$]-32769," + // ppc64x/power9:"ADDIS [$]-1,", "ADD [$]32767," + // ppc64x/power8:"ADDIS [$]-1,", "ADD [$]32767," out[9] = a - 32769 } func AddLargeConst2(a int, out []int) { - // loong64: -"ADDVU","ADDV16" + // loong64: -"ADDVU" "ADDV16" out[0] = a + 0x10000 } -// ----------------- // -// Subtraction // -// ----------------- // +// Subtraction var ef int @@ -83,65 +79,65 @@ func SubMem(arr []int, b, c, d int) int { arr[b]-- // amd64:`DECQ\s64\([A-Z]+\)` arr[8]-- - // 386:"SUBL\t4" - // amd64:"SUBQ\t8" + // 386:"SUBL 4" + // amd64:"SUBQ 8" return arr[0] - arr[1] } func SubFromConst(a int) int { - // ppc64x: `SUBC\tR[0-9]+,\s[$]40,\sR` - // riscv64: "ADDI\t\\$-40","NEG" + // ppc64x: `SUBC R[0-9]+,\s[$]40,\sR` + // riscv64: "ADDI [$]-40" "NEG" b := 40 - a return b } func SubFromConstNeg(a int) int { - // arm64: "ADD\t\\$40" - // loong64: "ADDV[U]\t\\$40" - // mips: "ADD[U]\t\\$40" - // mips64: "ADDV[U]\t\\$40" - // ppc64x: `ADD\t[$]40,\sR[0-9]+,\sR` - // riscv64: "ADDI\t\\$40",-"NEG" + // arm64: "ADD [$]40" + // loong64: "ADDV[U] [$]40" + // mips: "ADD[U] [$]40" + // mips64: "ADDV[U] [$]40" + // ppc64x: `ADD [$]40,\sR[0-9]+,\sR` + // riscv64: "ADDI [$]40" -"NEG" c := 40 - (-a) return c } func SubSubFromConst(a int) int { - // arm64: "ADD\t\\$20" - // loong64: "ADDV[U]\t\\$20" - // mips: "ADD[U]\t\\$20" - // mips64: "ADDV[U]\t\\$20" - // ppc64x: `ADD\t[$]20,\sR[0-9]+,\sR` - // riscv64: "ADDI\t\\$20",-"NEG" + // arm64: "ADD [$]20" + // loong64: "ADDV[U] [$]20" + // mips: "ADD[U] [$]20" + // mips64: "ADDV[U] [$]20" + // ppc64x: `ADD [$]20,\sR[0-9]+,\sR` + // riscv64: "ADDI [$]20" -"NEG" c := 40 - (20 - a) return c } func AddSubFromConst(a int) int { - // ppc64x: `SUBC\tR[0-9]+,\s[$]60,\sR` - // riscv64: "ADDI\t\\$-60","NEG" + // ppc64x: `SUBC R[0-9]+,\s[$]60,\sR` + // riscv64: "ADDI [$]-60" "NEG" c := 40 + (20 - a) return c } func NegSubFromConst(a int) int { - // arm64: "SUB\t\\$20" - // loong64: "ADDV[U]\t\\$-20" - // mips: "ADD[U]\t\\$-20" - // mips64: "ADDV[U]\t\\$-20" - // ppc64x: `ADD\t[$]-20,\sR[0-9]+,\sR` - // riscv64: "ADDI\t\\$-20" + // arm64: "SUB [$]20" + // loong64: "ADDV[U] [$]-20" + // mips: "ADD[U] [$]-20" + // mips64: "ADDV[U] [$]-20" + // ppc64x: `ADD [$]-20,\sR[0-9]+,\sR` + // riscv64: "ADDI [$]-20" c := -(20 - a) return c } func NegAddFromConstNeg(a int) int { - // arm64: "SUB\t\\$40","NEG" - // loong64: "ADDV[U]\t\\$-40","SUBV" - // mips: "ADD[U]\t\\$-40","SUB" - // mips64: "ADDV[U]\t\\$-40","SUBV" - // ppc64x: `SUBC\tR[0-9]+,\s[$]40,\sR` - // riscv64: "ADDI\t\\$-40","NEG" + // arm64: "SUB [$]40" "NEG" + // loong64: "ADDV[U] [$]-40" "SUBV" + // mips: "ADD[U] [$]-40" "SUB" + // mips64: "ADDV[U] [$]-40" "SUBV" + // ppc64x: `SUBC R[0-9]+,\s[$]40,\sR` + // riscv64: "ADDI [$]-40" "NEG" c := -(-40 + a) return c } @@ -153,19 +149,19 @@ func SubSubNegSimplify(a, b int) int { // mips:"SUB" // mips64:"SUBV" // ppc64x:"NEG" - // riscv64:"NEG",-"SUB" + // riscv64:"NEG" -"SUB" r := (a - b) - a return r } func SubAddSimplify(a, b int) int { - // amd64:-"SUBQ",-"ADDQ" - // arm64:-"SUB",-"ADD" - // loong64:-"SUBV",-"ADDV" - // mips:-"SUB",-"ADD" - // mips64:-"SUBV",-"ADDV" - // ppc64x:-"SUB",-"ADD" - // riscv64:-"SUB",-"ADD" + // amd64:-"SUBQ" -"ADDQ" + // arm64:-"SUB" -"ADD" + // loong64:-"SUBV" -"ADDV" + // mips:-"SUB" -"ADD" + // mips64:-"SUBV" -"ADDV" + // ppc64x:-"SUB" -"ADD" + // riscv64:-"SUB" -"ADD" r := a + (b - a) return r } @@ -173,9 +169,9 @@ func SubAddSimplify(a, b int) int { func SubAddSimplify2(a, b, c int) (int, int, int, int, int, int) { // amd64:-"ADDQ" // arm64:-"ADD" - // mips:"SUB",-"ADD" - // mips64:"SUBV",-"ADDV" - // loong64:"SUBV",-"ADDV" + // mips:"SUB" -"ADD" + // mips64:"SUBV" -"ADDV" + // loong64:"SUBV" -"ADDV" r := (a + b) - (a + c) // amd64:-"ADDQ" r1 := (a + b) - (c + a) @@ -185,9 +181,9 @@ func SubAddSimplify2(a, b, c int) (int, int, int, int, int, int) { r3 := (b + a) - (c + a) // amd64:-"SUBQ" // arm64:-"SUB" - // mips:"ADD",-"SUB" - // mips64:"ADDV",-"SUBV" - // loong64:"ADDV",-"SUBV" + // mips:"ADD" -"SUB" + // mips64:"ADDV" -"SUBV" + // loong64:"ADDV" -"SUBV" r4 := (a - c) + (c + b) // amd64:-"SUBQ" r5 := (a - c) + (b + c) @@ -195,31 +191,31 @@ func SubAddSimplify2(a, b, c int) (int, int, int, int, int, int) { } func SubAddNegSimplify(a, b int) int { - // amd64:"NEGQ",-"ADDQ",-"SUBQ" - // arm64:"NEG",-"ADD",-"SUB" - // loong64:"SUBV",-"ADDV" - // mips:"SUB",-"ADD" - // mips64:"SUBV",-"ADDV" - // ppc64x:"NEG",-"ADD",-"SUB" - // riscv64:"NEG",-"ADD",-"SUB" + // amd64:"NEGQ" -"ADDQ" -"SUBQ" + // arm64:"NEG" -"ADD" -"SUB" + // loong64:"SUBV" -"ADDV" + // mips:"SUB" -"ADD" + // mips64:"SUBV" -"ADDV" + // ppc64x:"NEG" -"ADD" -"SUB" + // riscv64:"NEG" -"ADD" -"SUB" r := a - (b + a) return r } func AddAddSubSimplify(a, b, c int) int { // amd64:-"SUBQ" - // arm64:"ADD",-"SUB" - // loong64:"ADDV",-"SUBV" - // mips:"ADD",-"SUB" - // mips64:"ADDV",-"SUBV" + // arm64:"ADD" -"SUB" + // loong64:"ADDV" -"SUBV" + // mips:"ADD" -"SUB" + // mips64:"ADDV" -"SUBV" // ppc64x:-"SUB" - // riscv64:"ADD","ADD",-"SUB" + // riscv64:"ADD" "ADD" -"SUB" r := a + (b + (c - a)) return r } func NegToInt32(a int) int { - // riscv64: "NEGW",-"MOVW" + // riscv64: "NEGW" -"MOVW" r := int(int32(-a)) return r } @@ -229,20 +225,20 @@ func NegToInt32(a int) int { // -------------------- // func Pow2Muls(n1, n2 int) (int, int) { - // amd64:"SHLQ\t[$]5",-"IMULQ" - // 386:"SHLL\t[$]5",-"IMULL" - // arm:"SLL\t[$]5",-"MUL" - // arm64:"LSL\t[$]5",-"MUL" - // loong64:"SLLV\t[$]5",-"MULV" - // ppc64x:"SLD\t[$]5",-"MUL" + // amd64:"SHLQ [$]5" -"IMULQ" + // 386:"SHLL [$]5" -"IMULL" + // arm:"SLL [$]5" -"MUL" + // arm64:"LSL [$]5" -"MUL" + // loong64:"SLLV [$]5" -"MULV" + // ppc64x:"SLD [$]5" -"MUL" a := n1 * 32 - // amd64:"SHLQ\t[$]6",-"IMULQ" - // 386:"SHLL\t[$]6",-"IMULL" - // arm:"SLL\t[$]6",-"MUL" + // amd64:"SHLQ [$]6" -"IMULQ" + // 386:"SHLL [$]6" -"IMULL" + // arm:"SLL [$]6" -"MUL" // arm64:`NEG\sR[0-9]+<<6,\sR[0-9]+`,-`LSL`,-`MUL` - // loong64:"SLLV\t[$]6",-"MULV" - // ppc64x:"SLD\t[$]6","NEG\\sR[0-9]+,\\sR[0-9]+",-"MUL" + // loong64:"SLLV [$]6" -"MULV" + // ppc64x:"SLD [$]6" "NEG\\sR[0-9]+,\\sR[0-9]+" -"MUL" b := -64 * n2 return a, b @@ -258,18 +254,18 @@ func Mul_2(n1 int32, n2 int64) (int32, int64) { } func Mul_96(n int) int { - // amd64:`SHLQ\t[$]5`,`LEAQ\t\(.*\)\(.*\*2\),`,-`IMULQ` - // 386:`SHLL\t[$]5`,`LEAL\t\(.*\)\(.*\*2\),`,-`IMULL` - // arm64:`LSL\t[$]5`,`ADD\sR[0-9]+<<1,\sR[0-9]+`,-`MUL` - // arm:`SLL\t[$]5`,`ADD\sR[0-9]+<<1,\sR[0-9]+`,-`MUL` - // loong64:"SLLV\t[$]5","ALSLV\t[$]1," - // s390x:`SLD\t[$]5`,`SLD\t[$]6`,-`MULLD` + // amd64:`SHLQ [$]5`,`LEAQ \(.*\)\(.*\*2\),`,-`IMULQ` + // 386:`SHLL [$]5`,`LEAL \(.*\)\(.*\*2\),`,-`IMULL` + // arm64:`LSL [$]5`,`ADD\sR[0-9]+<<1,\sR[0-9]+`,-`MUL` + // arm:`SLL [$]5`,`ADD\sR[0-9]+<<1,\sR[0-9]+`,-`MUL` + // loong64:"SLLV [$]5" "ALSLV [$]1," + // s390x:`SLD [$]5`,`SLD [$]6`,-`MULLD` return n * 96 } func Mul_n120(n int) int { - // loong64:"SLLV\t[$]3","SLLV\t[$]7","SUBVU",-"MULV" - // s390x:`SLD\t[$]3`,`SLD\t[$]7`,-`MULLD` + // loong64:"SLLV [$]3" "SLLV [$]7" "SUBVU" -"MULV" + // s390x:`SLD [$]3`,`SLD [$]7`,-`MULLD` return n * -120 } @@ -284,50 +280,50 @@ func MulMemSrc(a []uint32, b []float32) { // Multiplications merging tests func MergeMuls1(n int) int { - // amd64:"IMUL3Q\t[$]46" - // 386:"IMUL3L\t[$]46" - // ppc64x:"MULLD\t[$]46" + // amd64:"IMUL3Q [$]46" + // 386:"IMUL3L [$]46" + // ppc64x:"MULLD [$]46" return 15*n + 31*n // 46n } func MergeMuls2(n int) int { - // amd64:"IMUL3Q\t[$]23","(ADDQ\t[$]29)|(LEAQ\t29)" - // 386:"IMUL3L\t[$]23","ADDL\t[$]29" - // ppc64x/power9:"MADDLD",-"MULLD\t[$]23",-"ADD\t[$]29" - // ppc64x/power8:"MULLD\t[$]23","ADD\t[$]29" + // amd64:"IMUL3Q [$]23" "(ADDQ [$]29)|(LEAQ 29)" + // 386:"IMUL3L [$]23" "ADDL [$]29" + // ppc64x/power9:"MADDLD" -"MULLD [$]23" -"ADD [$]29" + // ppc64x/power8:"MULLD [$]23" "ADD [$]29" return 5*n + 7*(n+1) + 11*(n+2) // 23n + 29 } func MergeMuls3(a, n int) int { - // amd64:"ADDQ\t[$]19",-"IMULQ\t[$]19" - // 386:"ADDL\t[$]19",-"IMULL\t[$]19" - // ppc64x:"ADD\t[$]19",-"MULLD\t[$]19" + // amd64:"ADDQ [$]19" -"IMULQ [$]19" + // 386:"ADDL [$]19" -"IMULL [$]19" + // ppc64x:"ADD [$]19" -"MULLD [$]19" return a*n + 19*n // (a+19)n } func MergeMuls4(n int) int { - // amd64:"IMUL3Q\t[$]14" - // 386:"IMUL3L\t[$]14" - // ppc64x:"MULLD\t[$]14" + // amd64:"IMUL3Q [$]14" + // 386:"IMUL3L [$]14" + // ppc64x:"MULLD [$]14" return 23*n - 9*n // 14n } func MergeMuls5(a, n int) int { - // amd64:"ADDQ\t[$]-19",-"IMULQ\t[$]19" - // 386:"ADDL\t[$]-19",-"IMULL\t[$]19" - // ppc64x:"ADD\t[$]-19",-"MULLD\t[$]19" + // amd64:"ADDQ [$]-19" -"IMULQ [$]19" + // 386:"ADDL [$]-19" -"IMULL [$]19" + // ppc64x:"ADD [$]-19" -"MULLD [$]19" return a*n - 19*n // (a-19)n } // Multiplications folded negation func FoldNegMul(a int) int { - // loong64:"SUBVU","ALSLV\t[$]2","ALSLV\t[$]1" + // loong64:"SUBVU" "ALSLV [$]2" "ALSLV [$]1" return (-a) * 11 } func Fold2NegMul(a, b int) int { - // loong64:"MULV",-"SUBVU\tR[0-9], R0," + // loong64:"MULV" -"SUBVU R[0-9], R0," return (-a) * (-b) } @@ -342,17 +338,17 @@ func DivMemSrc(a []float64) { } func Pow2Divs(n1 uint, n2 int) (uint, int) { - // 386:"SHRL\t[$]5",-"DIVL" - // amd64:"SHRQ\t[$]5",-"DIVQ" - // arm:"SRL\t[$]5",-".*udiv" - // arm64:"LSR\t[$]5",-"UDIV" + // 386:"SHRL [$]5" -"DIVL" + // amd64:"SHRQ [$]5" -"DIVQ" + // arm:"SRL [$]5" -".*udiv" + // arm64:"LSR [$]5" -"UDIV" // ppc64x:"SRD" a := n1 / 32 // unsigned - // amd64:"SARQ\t[$]6",-"IDIVQ" - // 386:"SARL\t[$]6",-"IDIVL" - // arm:"SRA\t[$]6",-".*udiv" - // arm64:"ASR\t[$]6",-"SDIV" + // amd64:"SARQ [$]6" -"IDIVQ" + // 386:"SARL [$]6" -"IDIVL" + // arm:"SRA [$]6" -".*udiv" + // arm64:"ASR [$]6" -"SDIV" // ppc64x:"SRAD" b := n2 / 64 // signed @@ -361,16 +357,16 @@ func Pow2Divs(n1 uint, n2 int) (uint, int) { // Check that constant divisions get turned into MULs func ConstDivs(n1 uint, n2 int) (uint, int) { - // amd64:"MOVQ\t[$]-1085102592571150095","MULQ",-"DIVQ" - // 386:"MOVL\t[$]-252645135","MULL",-"DIVL" - // arm64:`MOVD`,`UMULH`,-`DIV` - // arm:`MOVW`,`MUL`,-`.*udiv` + // amd64: "MOVQ [$]-1085102592571150095" "MULQ" -"DIVQ" + // 386: "MOVL [$]-252645135" "MULL" -"DIVL" + // arm64: `MOVD`,`UMULH`,-`DIV` + // arm: `MOVW`,`MUL`,-`.*udiv` a := n1 / 17 // unsigned - // amd64:"MOVQ\t[$]-1085102592571150095","IMULQ",-"IDIVQ" - // 386:"MOVL\t[$]-252645135","IMULL",-"IDIVL" - // arm64:`SMULH`,-`DIV` - // arm:`MOVW`,`MUL`,-`.*udiv` + // amd64: "MOVQ [$]-1085102592571150095" "IMULQ" -"IDIVQ" + // 386: "IMULL" "SARL [$]4," "SARL [$]31," "SUBL" -".*DIV" + // arm64: `SMULH` -`DIV` + // arm: `MOVW` `MUL` -`.*udiv` b := n2 / 17 // signed return a, b @@ -383,17 +379,17 @@ func FloatDivs(a []float32) float32 { } func Pow2Mods(n1 uint, n2 int) (uint, int) { - // 386:"ANDL\t[$]31",-"DIVL" - // amd64:"ANDL\t[$]31",-"DIVQ" - // arm:"AND\t[$]31",-".*udiv" - // arm64:"AND\t[$]31",-"UDIV" + // 386:"ANDL [$]31" -"DIVL" + // amd64:"ANDL [$]31" -"DIVQ" + // arm:"AND [$]31" -".*udiv" + // arm64:"AND [$]31" -"UDIV" // ppc64x:"RLDICL" a := n1 % 32 // unsigned - // 386:"SHRL",-"IDIVL" - // amd64:"SHRQ",-"IDIVQ" - // arm:"SRA",-".*udiv" - // arm64:"ASR",-"REM" + // 386:"SHRL" -"IDIVL" + // amd64:"SHRQ" -"IDIVQ" + // arm:"SRA" -".*udiv" + // arm64:"ASR" -"REM" // ppc64x:"SRAD" b := n2 % 64 // signed @@ -402,18 +398,18 @@ func Pow2Mods(n1 uint, n2 int) (uint, int) { // Check that signed divisibility checks get converted to AND on low bits func Pow2DivisibleSigned(n1, n2 int) (bool, bool) { - // 386:"TESTL\t[$]63",-"DIVL",-"SHRL" - // amd64:"TESTQ\t[$]63",-"DIVQ",-"SHRQ" - // arm:"AND\t[$]63",-".*udiv",-"SRA" - // arm64:"TST\t[$]63",-"UDIV",-"ASR",-"AND" - // ppc64x:"ANDCC",-"RLDICL",-"SRAD",-"CMP" + // 386:"TESTL [$]63" -"DIVL" -"SHRL" + // amd64:"TESTQ [$]63" -"DIVQ" -"SHRQ" + // arm:"AND [$]63" -".*udiv" -"SRA" + // arm64:"TST [$]63" -"UDIV" -"ASR" -"AND" + // ppc64x:"ANDCC" -"RLDICL" -"SRAD" -"CMP" a := n1%64 == 0 // signed divisible - // 386:"TESTL\t[$]63",-"DIVL",-"SHRL" - // amd64:"TESTQ\t[$]63",-"DIVQ",-"SHRQ" - // arm:"AND\t[$]63",-".*udiv",-"SRA" - // arm64:"TST\t[$]63",-"UDIV",-"ASR",-"AND" - // ppc64x:"ANDCC",-"RLDICL",-"SRAD",-"CMP" + // 386:"TESTL [$]63" -"DIVL" -"SHRL" + // amd64:"TESTQ [$]63" -"DIVQ" -"SHRQ" + // arm:"AND [$]63" -".*udiv" -"SRA" + // arm64:"TST [$]63" -"UDIV" -"ASR" -"AND" + // ppc64x:"ANDCC" -"RLDICL" -"SRAD" -"CMP" b := n2%64 != 0 // signed indivisible return a, b @@ -421,16 +417,16 @@ func Pow2DivisibleSigned(n1, n2 int) (bool, bool) { // Check that constant modulo divs get turned into MULs func ConstMods(n1 uint, n2 int) (uint, int) { - // amd64:"MOVQ\t[$]-1085102592571150095","MULQ",-"DIVQ" - // 386:"MOVL\t[$]-252645135","MULL",-"DIVL" - // arm64:`MOVD`,`UMULH`,-`DIV` - // arm:`MOVW`,`MUL`,-`.*udiv` + // amd64: "MOVQ [$]-1085102592571150095" "MULQ" -"DIVQ" + // 386: "MOVL [$]-252645135" "MULL" -".*DIVL" + // arm64: `MOVD` `UMULH` -`DIV` + // arm: `MOVW` `MUL` -`.*udiv` a := n1 % 17 // unsigned - // amd64:"MOVQ\t[$]-1085102592571150095","IMULQ",-"IDIVQ" - // 386:"MOVL\t[$]-252645135","IMULL",-"IDIVL" - // arm64:`SMULH`,-`DIV` - // arm:`MOVW`,`MUL`,-`.*udiv` + // amd64: "MOVQ [$]-1085102592571150095" "IMULQ" -"IDIVQ" + // 386: "IMULL" "SARL [$]4," "SARL [$]31," "SUBL" "SHLL [$]4," "SUBL" -".*DIV" + // arm64: `SMULH` -`DIV` + // arm: `MOVW` `MUL` -`.*udiv` b := n2 % 17 // signed return a, b @@ -438,38 +434,38 @@ func ConstMods(n1 uint, n2 int) (uint, int) { // Check that divisibility checks x%c==0 are converted to MULs and rotates func DivisibleU(n uint) (bool, bool) { - // amd64:"MOVQ\t[$]-6148914691236517205","IMULQ","ROLQ\t[$]63",-"DIVQ" - // 386:"IMUL3L\t[$]-1431655765","ROLL\t[$]31",-"DIVQ" - // arm64:"MOVD\t[$]-6148914691236517205","MOVD\t[$]3074457345618258602","MUL","ROR",-"DIV" - // arm:"MUL","CMP\t[$]715827882",-".*udiv" - // ppc64x:"MULLD","ROTL\t[$]63" + // amd64:"MOVQ [$]-6148914691236517205" "IMULQ" "ROLQ [$]63" -"DIVQ" + // 386:"IMUL3L [$]-1431655765" "ROLL [$]31" -"DIVQ" + // arm64:"MOVD [$]-6148914691236517205" "MOVD [$]3074457345618258602" "MUL" "ROR" -"DIV" + // arm:"MUL" "CMP [$]715827882" -".*udiv" + // ppc64x:"MULLD" "ROTL [$]63" even := n%6 == 0 - // amd64:"MOVQ\t[$]-8737931403336103397","IMULQ",-"ROLQ",-"DIVQ" - // 386:"IMUL3L\t[$]678152731",-"ROLL",-"DIVQ" - // arm64:"MOVD\t[$]-8737931403336103397","MUL",-"ROR",-"DIV" - // arm:"MUL","CMP\t[$]226050910",-".*udiv" - // ppc64x:"MULLD",-"ROTL" + // amd64:"MOVQ [$]-8737931403336103397" "IMULQ" -"ROLQ" -"DIVQ" + // 386:"IMUL3L [$]678152731" -"ROLL" -"DIVQ" + // arm64:"MOVD [$]-8737931403336103397" "MUL" -"ROR" -"DIV" + // arm:"MUL" "CMP [$]226050910" -".*udiv" + // ppc64x:"MULLD" -"ROTL" odd := n%19 == 0 return even, odd } func Divisible(n int) (bool, bool) { - // amd64:"IMULQ","ADD","ROLQ\t[$]63",-"DIVQ" - // 386:"IMUL3L\t[$]-1431655765","ADDL\t[$]715827882","ROLL\t[$]31",-"DIVQ" - // arm64:"MOVD\t[$]-6148914691236517205","MOVD\t[$]3074457345618258602","MUL","ADD\tR","ROR",-"DIV" - // arm:"MUL","ADD\t[$]715827882",-".*udiv" - // ppc64x/power8:"MULLD","ADD","ROTL\t[$]63" - // ppc64x/power9:"MADDLD","ROTL\t[$]63" + // amd64:"IMULQ" "ADD" "ROLQ [$]63" -"DIVQ" + // 386:"IMUL3L [$]-1431655765" "ADDL [$]715827882" "ROLL [$]31" -"DIVQ" + // arm64:"MOVD [$]-6148914691236517205" "MOVD [$]3074457345618258602" "MUL" "ADD R" "ROR" -"DIV" + // arm:"MUL" "ADD [$]715827882" -".*udiv" + // ppc64x/power8:"MULLD" "ADD" "ROTL [$]63" + // ppc64x/power9:"MADDLD" "ROTL [$]63" even := n%6 == 0 - // amd64:"IMULQ","ADD",-"ROLQ",-"DIVQ" - // 386:"IMUL3L\t[$]678152731","ADDL\t[$]113025455",-"ROLL",-"DIVQ" - // arm64:"MUL","MOVD\t[$]485440633518672410","ADD",-"ROR",-"DIV" - // arm:"MUL","ADD\t[$]113025455",-".*udiv" - // ppc64x/power8:"MULLD","ADD",-"ROTL" - // ppc64x/power9:"MADDLD",-"ROTL" + // amd64:"IMULQ" "ADD" -"ROLQ" -"DIVQ" + // 386:"IMUL3L [$]678152731" "ADDL [$]113025455" -"ROLL" -"DIVQ" + // arm64:"MUL" "MOVD [$]485440633518672410" "ADD" -"ROR" -"DIV" + // arm:"MUL" "ADD [$]113025455" -".*udiv" + // ppc64x/power8:"MULLD" "ADD" -"ROTL" + // ppc64x/power9:"MADDLD" -"ROTL" odd := n%19 == 0 return even, odd @@ -568,64 +564,64 @@ func NoFix16B(divd int16) (int16, int16) { // optimized into shifts and ands func LenDiv1(a []int) int { - // 386:"SHRL\t[$]10" - // amd64:"SHRQ\t[$]10" - // arm64:"LSR\t[$]10",-"SDIV" - // arm:"SRL\t[$]10",-".*udiv" - // ppc64x:"SRD"\t[$]10" + // 386:"SHRL [$]10" + // amd64:"SHRQ [$]10" + // arm64:"LSR [$]10" -"SDIV" + // arm:"SRL [$]10" -".*udiv" + // ppc64x:"SRD" [$]10" return len(a) / 1024 } func LenDiv2(s string) int { - // 386:"SHRL\t[$]11" - // amd64:"SHRQ\t[$]11" - // arm64:"LSR\t[$]11",-"SDIV" - // arm:"SRL\t[$]11",-".*udiv" - // ppc64x:"SRD\t[$]11" + // 386:"SHRL [$]11" + // amd64:"SHRQ [$]11" + // arm64:"LSR [$]11" -"SDIV" + // arm:"SRL [$]11" -".*udiv" + // ppc64x:"SRD [$]11" return len(s) / (4097 >> 1) } func LenMod1(a []int) int { - // 386:"ANDL\t[$]1023" - // amd64:"ANDL\t[$]1023" - // arm64:"AND\t[$]1023",-"SDIV" - // arm/6:"AND",-".*udiv" - // arm/7:"BFC",-".*udiv",-"AND" + // 386:"ANDL [$]1023" + // amd64:"ANDL [$]1023" + // arm64:"AND [$]1023" -"SDIV" + // arm/6:"AND" -".*udiv" + // arm/7:"BFC" -".*udiv" -"AND" // ppc64x:"RLDICL" return len(a) % 1024 } func LenMod2(s string) int { - // 386:"ANDL\t[$]2047" - // amd64:"ANDL\t[$]2047" - // arm64:"AND\t[$]2047",-"SDIV" - // arm/6:"AND",-".*udiv" - // arm/7:"BFC",-".*udiv",-"AND" + // 386:"ANDL [$]2047" + // amd64:"ANDL [$]2047" + // arm64:"AND [$]2047" -"SDIV" + // arm/6:"AND" -".*udiv" + // arm/7:"BFC" -".*udiv" -"AND" // ppc64x:"RLDICL" return len(s) % (4097 >> 1) } func CapDiv(a []int) int { - // 386:"SHRL\t[$]12" - // amd64:"SHRQ\t[$]12" - // arm64:"LSR\t[$]12",-"SDIV" - // arm:"SRL\t[$]12",-".*udiv" - // ppc64x:"SRD\t[$]12" + // 386:"SHRL [$]12" + // amd64:"SHRQ [$]12" + // arm64:"LSR [$]12" -"SDIV" + // arm:"SRL [$]12" -".*udiv" + // ppc64x:"SRD [$]12" return cap(a) / ((1 << 11) + 2048) } func CapMod(a []int) int { - // 386:"ANDL\t[$]4095" - // amd64:"ANDL\t[$]4095" - // arm64:"AND\t[$]4095",-"SDIV" - // arm/6:"AND",-".*udiv" - // arm/7:"BFC",-".*udiv",-"AND" + // 386:"ANDL [$]4095" + // amd64:"ANDL [$]4095" + // arm64:"AND [$]4095" -"SDIV" + // arm/6:"AND" -".*udiv" + // arm/7:"BFC" -".*udiv" -"AND" // ppc64x:"RLDICL" return cap(a) % ((1 << 11) + 2048) } func AddMul(x int) int { - // amd64:"LEAQ\t1" + // amd64:"LEAQ 1" return 2*x + 1 } @@ -675,29 +671,30 @@ func addSpecial(a, b, c uint32) (uint32, uint32, uint32) { } // Divide -> shift rules usually require fixup for negative inputs. -// If the input is non-negative, make sure the fixup is eliminated. +// If the input is non-negative, make sure the unsigned form is generated. func divInt(v int64) int64 { if v < 0 { - return 0 + // amd64:`SARQ.*63,`, `SHRQ.*56,`, `SARQ.*8,` + return v / 256 } - // amd64:-`.*SARQ.*63,`, -".*SHRQ", ".*SARQ.*[$]9," + // amd64:-`.*SARQ`, `SHRQ.*9,` return v / 512 } // The reassociate rules "x - (z + C) -> (x - z) - C" and // "(z + C) -x -> C + (z - x)" can optimize the following cases. func constantFold1(i0, j0, i1, j1, i2, j2, i3, j3 int) (int, int, int, int) { - // arm64:"SUB","ADD\t[$]2" - // ppc64x:"SUB","ADD\t[$]2" + // arm64:"SUB" "ADD [$]2" + // ppc64x:"SUB" "ADD [$]2" r0 := (i0 + 3) - (j0 + 1) - // arm64:"SUB","SUB\t[$]4" - // ppc64x:"SUB","ADD\t[$]-4" + // arm64:"SUB" "SUB [$]4" + // ppc64x:"SUB" "ADD [$]-4" r1 := (i1 - 3) - (j1 + 1) - // arm64:"SUB","ADD\t[$]4" - // ppc64x:"SUB","ADD\t[$]4" + // arm64:"SUB" "ADD [$]4" + // ppc64x:"SUB" "ADD [$]4" r2 := (i2 + 3) - (j2 - 1) - // arm64:"SUB","SUB\t[$]2" - // ppc64x:"SUB","ADD\t[$]-2" + // arm64:"SUB" "SUB [$]2" + // ppc64x:"SUB" "ADD [$]-2" r3 := (i3 - 3) - (j3 - 1) return r0, r1, r2, r3 } @@ -705,53 +702,51 @@ func constantFold1(i0, j0, i1, j1, i2, j2, i3, j3 int) (int, int, int, int) { // The reassociate rules "x - (z + C) -> (x - z) - C" and // "(C - z) - x -> C - (z + x)" can optimize the following cases. func constantFold2(i0, j0, i1, j1 int) (int, int) { - // arm64:"ADD","MOVD\t[$]2","SUB" - // ppc64x: `SUBC\tR[0-9]+,\s[$]2,\sR` + // arm64:"ADD" "MOVD [$]2" "SUB" + // ppc64x: `SUBC R[0-9]+,\s[$]2,\sR` r0 := (3 - i0) - (j0 + 1) - // arm64:"ADD","MOVD\t[$]4","SUB" - // ppc64x: `SUBC\tR[0-9]+,\s[$]4,\sR` + // arm64:"ADD" "MOVD [$]4" "SUB" + // ppc64x: `SUBC R[0-9]+,\s[$]4,\sR` r1 := (3 - i1) - (j1 - 1) return r0, r1 } func constantFold3(i, j int) int { - // arm64: "LSL\t[$]5,","SUB\tR[0-9]+<<1,",-"ADD" - // ppc64x:"MULLD\t[$]30","MULLD" + // arm64: "LSL [$]5," "SUB R[0-9]+<<1," -"ADD" + // ppc64x:"MULLD [$]30" "MULLD" r := (5 * i) * (6 * j) return r } -// ----------------- // -// Integer Min/Max // -// ----------------- // +// Integer Min/Max func Int64Min(a, b int64) int64 { - // amd64: "CMPQ","CMOVQLT" - // arm64: "CMP","CSEL" - // riscv64/rva20u64:"BLT\t" - // riscv64/rva22u64,riscv64/rva23u64:"MIN\t" + // amd64: "CMPQ" "CMOVQLT" + // arm64: "CMP" "CSEL" + // riscv64/rva20u64:"BLT " + // riscv64/rva22u64,riscv64/rva23u64:"MIN " return min(a, b) } func Int64Max(a, b int64) int64 { - // amd64: "CMPQ","CMOVQGT" - // arm64: "CMP","CSEL" - // riscv64/rva20u64:"BLT\t" - // riscv64/rva22u64,riscv64/rva23u64:"MAX\t" + // amd64: "CMPQ" "CMOVQGT" + // arm64: "CMP" "CSEL" + // riscv64/rva20u64:"BLT " + // riscv64/rva22u64,riscv64/rva23u64:"MAX " return max(a, b) } func Uint64Min(a, b uint64) uint64 { - // amd64: "CMPQ","CMOVQCS" - // arm64: "CMP","CSEL" + // amd64: "CMPQ" "CMOVQCS" + // arm64: "CMP" "CSEL" // riscv64/rva20u64:"BLTU" // riscv64/rva22u64,riscv64/rva23u64:"MINU" return min(a, b) } func Uint64Max(a, b uint64) uint64 { - // amd64: "CMPQ","CMOVQHI" - // arm64: "CMP","CSEL" + // amd64: "CMPQ" "CMOVQHI" + // arm64: "CMP" "CSEL" // riscv64/rva20u64:"BLTU" // riscv64/rva22u64,riscv64/rva23u64:"MAXU" return max(a, b) diff --git a/test/codegen/atomics.go b/test/codegen/atomics.go index 14024dcd836..7ae5a9aff71 100644 --- a/test/codegen/atomics.go +++ b/test/codegen/atomics.go @@ -22,7 +22,7 @@ func (c *Counter) Increment() { // arm64/v8.1:"LDADDALW" // arm64/v8.0:".*arm64HasATOMICS" // arm64/v8.1:-".*arm64HasATOMICS" - // amd64:"LOCK",-"CMPXCHG" + // amd64:"LOCK" -"CMPXCHG" atomic.AddInt32(&c.count, 1) } @@ -34,13 +34,13 @@ func atomicLogical64(x *atomic.Uint64) uint64 { // arm64/v8.0:".*arm64HasATOMICS" // arm64/v8.1:-".*arm64HasATOMICS" // On amd64, make sure we use LOCK+AND instead of CMPXCHG when we don't use the result. - // amd64:"LOCK",-"CMPXCHGQ" + // amd64:"LOCK" -"CMPXCHGQ" x.And(11) // arm64/v8.0:"LDCLRALD" // arm64/v8.1:"LDCLRALD" // arm64/v8.0:".*arm64HasATOMICS" // arm64/v8.1:-".*arm64HasATOMICS" - // amd64:"LOCK","CMPXCHGQ" + // amd64:"LOCK" "CMPXCHGQ" r += x.And(22) // arm64/v8.0:"LDORALD" @@ -48,13 +48,13 @@ func atomicLogical64(x *atomic.Uint64) uint64 { // arm64/v8.0:".*arm64HasATOMICS" // arm64/v8.1:-".*arm64HasATOMICS" // On amd64, make sure we use LOCK+OR instead of CMPXCHG when we don't use the result. - // amd64:"LOCK",-"CMPXCHGQ" + // amd64:"LOCK" -"CMPXCHGQ" x.Or(33) // arm64/v8.0:"LDORALD" // arm64/v8.1:"LDORALD" // arm64/v8.0:".*arm64HasATOMICS" // arm64/v8.1:-".*arm64HasATOMICS" - // amd64:"LOCK","CMPXCHGQ" + // amd64:"LOCK" "CMPXCHGQ" r += x.Or(44) return r @@ -68,13 +68,13 @@ func atomicLogical32(x *atomic.Uint32) uint32 { // arm64/v8.0:".*arm64HasATOMICS" // arm64/v8.1:-".*arm64HasATOMICS" // On amd64, make sure we use LOCK+AND instead of CMPXCHG when we don't use the result. - // amd64:"LOCK",-"CMPXCHGL" + // amd64:"LOCK" -"CMPXCHGL" x.And(11) // arm64/v8.0:"LDCLRALW" // arm64/v8.1:"LDCLRALW" // arm64/v8.0:".*arm64HasATOMICS" // arm64/v8.1:-".*arm64HasATOMICS" - // amd64:"LOCK","CMPXCHGL" + // amd64:"LOCK" "CMPXCHGL" r += x.And(22) // arm64/v8.0:"LDORALW" @@ -82,13 +82,13 @@ func atomicLogical32(x *atomic.Uint32) uint32 { // arm64/v8.0:".*arm64HasATOMICS" // arm64/v8.1:-".*arm64HasATOMICS" // On amd64, make sure we use LOCK+OR instead of CMPXCHG when we don't use the result. - // amd64:"LOCK",-"CMPXCHGL" + // amd64:"LOCK" -"CMPXCHGL" x.Or(33) // arm64/v8.0:"LDORALW" // arm64/v8.1:"LDORALW" // arm64/v8.0:".*arm64HasATOMICS" // arm64/v8.1:-".*arm64HasATOMICS" - // amd64:"LOCK","CMPXCHGL" + // amd64:"LOCK" "CMPXCHGL" r += x.Or(44) return r diff --git a/test/codegen/bitfield.go b/test/codegen/bitfield.go index 51221266e10..25393627504 100644 --- a/test/codegen/bitfield.go +++ b/test/codegen/bitfield.go @@ -10,304 +10,304 @@ package codegen // insertion/extraction simplifications/optimizations. func extr1(x, x2 uint64) uint64 { - return x<<7 + x2>>57 // arm64:"EXTR\t[$]57," + return x<<7 + x2>>57 // arm64:"EXTR [$]57," } func extr2(x, x2 uint64) uint64 { - return x<<7 | x2>>57 // arm64:"EXTR\t[$]57," + return x<<7 | x2>>57 // arm64:"EXTR [$]57," } func extr3(x, x2 uint64) uint64 { - return x<<7 ^ x2>>57 // arm64:"EXTR\t[$]57," + return x<<7 ^ x2>>57 // arm64:"EXTR [$]57," } func extr4(x, x2 uint32) uint32 { - return x<<7 + x2>>25 // arm64:"EXTRW\t[$]25," + return x<<7 + x2>>25 // arm64:"EXTRW [$]25," } func extr5(x, x2 uint32) uint32 { - return x<<7 | x2>>25 // arm64:"EXTRW\t[$]25," + return x<<7 | x2>>25 // arm64:"EXTRW [$]25," } func extr6(x, x2 uint32) uint32 { - return x<<7 ^ x2>>25 // arm64:"EXTRW\t[$]25," + return x<<7 ^ x2>>25 // arm64:"EXTRW [$]25," } // check 32-bit shift masking func mask32(x uint32) uint32 { - return (x << 29) >> 29 // arm64:"AND\t[$]7, R[0-9]+",-"LSR",-"LSL" + return (x << 29) >> 29 // arm64:"AND [$]7, R[0-9]+" -"LSR" -"LSL" } // check 16-bit shift masking func mask16(x uint16) uint16 { - return (x << 14) >> 14 // arm64:"AND\t[$]3, R[0-9]+",-"LSR",-"LSL" + return (x << 14) >> 14 // arm64:"AND [$]3, R[0-9]+" -"LSR" -"LSL" } // check 8-bit shift masking func mask8(x uint8) uint8 { - return (x << 7) >> 7 // arm64:"AND\t[$]1, R[0-9]+",-"LSR",-"LSL" + return (x << 7) >> 7 // arm64:"AND [$]1, R[0-9]+" -"LSR" -"LSL" } func maskshift(x uint64) uint64 { - // arm64:"AND\t[$]4095, R[0-9]+",-"LSL",-"LSR",-"UBFIZ",-"UBFX" + // arm64:"AND [$]4095, R[0-9]+" -"LSL" -"LSR" -"UBFIZ" -"UBFX" return ((x << 5) & (0xfff << 5)) >> 5 } // bitfield ops // bfi func bfi1(x, y uint64) uint64 { - // arm64:"BFI\t[$]4, R[0-9]+, [$]12",-"LSL",-"LSR",-"AND" + // arm64:"BFI [$]4, R[0-9]+, [$]12" -"LSL" -"LSR" -"AND" return ((x & 0xfff) << 4) | (y & 0xffffffffffff000f) } func bfi2(x, y uint64) uint64 { - // arm64:"BFI\t[$]12, R[0-9]+, [$]40",-"LSL",-"LSR",-"AND" + // arm64:"BFI [$]12, R[0-9]+, [$]40" -"LSL" -"LSR" -"AND" return (x << 24 >> 12) | (y & 0xfff0000000000fff) } // bfxil func bfxil1(x, y uint64) uint64 { - // arm64:"BFXIL\t[$]5, R[0-9]+, [$]12",-"LSL",-"LSR",-"AND" + // arm64:"BFXIL [$]5, R[0-9]+, [$]12" -"LSL" -"LSR" -"AND" return ((x >> 5) & 0xfff) | (y & 0xfffffffffffff000) } func bfxil2(x, y uint64) uint64 { - // arm64:"BFXIL\t[$]12, R[0-9]+, [$]40",-"LSL",-"LSR",-"AND" + // arm64:"BFXIL [$]12, R[0-9]+, [$]40" -"LSL" -"LSR" -"AND" return (x << 12 >> 24) | (y & 0xffffff0000000000) } // sbfiz // merge shifts into sbfiz: (x << lc) >> rc && lc > rc. func sbfiz1(x int64) int64 { - // arm64:"SBFIZ\t[$]1, R[0-9]+, [$]60",-"LSL",-"ASR" + // arm64:"SBFIZ [$]1, R[0-9]+, [$]60" -"LSL" -"ASR" return (x << 4) >> 3 } // merge shift and sign-extension into sbfiz. func sbfiz2(x int32) int64 { - return int64(x << 3) // arm64:"SBFIZ\t[$]3, R[0-9]+, [$]29",-"LSL" + return int64(x << 3) // arm64:"SBFIZ [$]3, R[0-9]+, [$]29" -"LSL" } func sbfiz3(x int16) int64 { - return int64(x << 3) // arm64:"SBFIZ\t[$]3, R[0-9]+, [$]13",-"LSL" + return int64(x << 3) // arm64:"SBFIZ [$]3, R[0-9]+, [$]13" -"LSL" } func sbfiz4(x int8) int64 { - return int64(x << 3) // arm64:"SBFIZ\t[$]3, R[0-9]+, [$]5",-"LSL" + return int64(x << 3) // arm64:"SBFIZ [$]3, R[0-9]+, [$]5" -"LSL" } // sbfiz combinations. // merge shift with sbfiz into sbfiz. func sbfiz5(x int32) int32 { - // arm64:"SBFIZ\t[$]1, R[0-9]+, [$]28",-"LSL",-"ASR" + // arm64:"SBFIZ [$]1, R[0-9]+, [$]28" -"LSL" -"ASR" return (x << 4) >> 3 } func sbfiz6(x int16) int64 { - return int64(x+1) << 3 // arm64:"SBFIZ\t[$]3, R[0-9]+, [$]16",-"LSL" + return int64(x+1) << 3 // arm64:"SBFIZ [$]3, R[0-9]+, [$]16" -"LSL" } func sbfiz7(x int8) int64 { - return int64(x+1) << 62 // arm64:"SBFIZ\t[$]62, R[0-9]+, [$]2",-"LSL" + return int64(x+1) << 62 // arm64:"SBFIZ [$]62, R[0-9]+, [$]2" -"LSL" } func sbfiz8(x int32) int64 { - return int64(x+1) << 40 // arm64:"SBFIZ\t[$]40, R[0-9]+, [$]24",-"LSL" + return int64(x+1) << 40 // arm64:"SBFIZ [$]40, R[0-9]+, [$]24" -"LSL" } // sbfx // merge shifts into sbfx: (x << lc) >> rc && lc <= rc. func sbfx1(x int64) int64 { - return (x << 3) >> 4 // arm64:"SBFX\t[$]1, R[0-9]+, [$]60",-"LSL",-"ASR" + return (x << 3) >> 4 // arm64:"SBFX [$]1, R[0-9]+, [$]60" -"LSL" -"ASR" } func sbfx2(x int64) int64 { - return (x << 60) >> 60 // arm64:"SBFX\t[$]0, R[0-9]+, [$]4",-"LSL",-"ASR" + return (x << 60) >> 60 // arm64:"SBFX [$]0, R[0-9]+, [$]4" -"LSL" -"ASR" } // merge shift and sign-extension into sbfx. func sbfx3(x int32) int64 { - return int64(x) >> 3 // arm64:"SBFX\t[$]3, R[0-9]+, [$]29",-"ASR" + return int64(x) >> 3 // arm64:"SBFX [$]3, R[0-9]+, [$]29" -"ASR" } func sbfx4(x int16) int64 { - return int64(x) >> 3 // arm64:"SBFX\t[$]3, R[0-9]+, [$]13",-"ASR" + return int64(x) >> 3 // arm64:"SBFX [$]3, R[0-9]+, [$]13" -"ASR" } func sbfx5(x int8) int64 { - return int64(x) >> 3 // arm64:"SBFX\t[$]3, R[0-9]+, [$]5",-"ASR" + return int64(x) >> 3 // arm64:"SBFX [$]3, R[0-9]+, [$]5" -"ASR" } func sbfx6(x int32) int64 { - return int64(x >> 30) // arm64:"SBFX\t[$]30, R[0-9]+, [$]2" + return int64(x >> 30) // arm64:"SBFX [$]30, R[0-9]+, [$]2" } func sbfx7(x int16) int64 { - return int64(x >> 10) // arm64:"SBFX\t[$]10, R[0-9]+, [$]6" + return int64(x >> 10) // arm64:"SBFX [$]10, R[0-9]+, [$]6" } func sbfx8(x int8) int64 { - return int64(x >> 5) // arm64:"SBFX\t[$]5, R[0-9]+, [$]3" + return int64(x >> 5) // arm64:"SBFX [$]5, R[0-9]+, [$]3" } // sbfx combinations. // merge shifts with sbfiz into sbfx. func sbfx9(x int32) int32 { - return (x << 3) >> 4 // arm64:"SBFX\t[$]1, R[0-9]+, [$]28",-"LSL",-"ASR" + return (x << 3) >> 4 // arm64:"SBFX [$]1, R[0-9]+, [$]28" -"LSL" -"ASR" } // merge sbfx and sign-extension into sbfx. func sbfx10(x int32) int64 { c := x + 5 - return int64(c >> 20) // arm64"SBFX\t[$]20, R[0-9]+, [$]12",-"MOVW\tR[0-9]+, R[0-9]+" + return int64(c >> 20) // arm64"SBFX [$]20, R[0-9]+, [$]12" -"MOVW R[0-9]+, R[0-9]+" } // ubfiz // merge shifts into ubfiz: (x<>rc && lc>rc func ubfiz1(x uint64) uint64 { - // arm64:"UBFIZ\t[$]1, R[0-9]+, [$]60",-"LSL",-"LSR" - // s390x:"RISBGZ\t[$]3, [$]62, [$]1, ",-"SLD",-"SRD" + // arm64:"UBFIZ [$]1, R[0-9]+, [$]60" -"LSL" -"LSR" + // s390x:"RISBGZ [$]3, [$]62, [$]1, " -"SLD" -"SRD" return (x << 4) >> 3 } // merge shift and zero-extension into ubfiz. func ubfiz2(x uint32) uint64 { - return uint64(x+1) << 3 // arm64:"UBFIZ\t[$]3, R[0-9]+, [$]32",-"LSL" + return uint64(x+1) << 3 // arm64:"UBFIZ [$]3, R[0-9]+, [$]32" -"LSL" } func ubfiz3(x uint16) uint64 { - return uint64(x+1) << 3 // arm64:"UBFIZ\t[$]3, R[0-9]+, [$]16",-"LSL" + return uint64(x+1) << 3 // arm64:"UBFIZ [$]3, R[0-9]+, [$]16" -"LSL" } func ubfiz4(x uint8) uint64 { - return uint64(x+1) << 3 // arm64:"UBFIZ\t[$]3, R[0-9]+, [$]8",-"LSL" + return uint64(x+1) << 3 // arm64:"UBFIZ [$]3, R[0-9]+, [$]8" -"LSL" } func ubfiz5(x uint8) uint64 { - return uint64(x) << 60 // arm64:"UBFIZ\t[$]60, R[0-9]+, [$]4",-"LSL" + return uint64(x) << 60 // arm64:"UBFIZ [$]60, R[0-9]+, [$]4" -"LSL" } func ubfiz6(x uint32) uint64 { - return uint64(x << 30) // arm64:"UBFIZ\t[$]30, R[0-9]+, [$]2", + return uint64(x << 30) // arm64:"UBFIZ [$]30, R[0-9]+, [$]2", } func ubfiz7(x uint16) uint64 { - return uint64(x << 10) // arm64:"UBFIZ\t[$]10, R[0-9]+, [$]6", + return uint64(x << 10) // arm64:"UBFIZ [$]10, R[0-9]+, [$]6", } func ubfiz8(x uint8) uint64 { - return uint64(x << 7) // arm64:"UBFIZ\t[$]7, R[0-9]+, [$]1", + return uint64(x << 7) // arm64:"UBFIZ [$]7, R[0-9]+, [$]1", } // merge ANDconst into ubfiz. func ubfiz9(x uint64) uint64 { - // arm64:"UBFIZ\t[$]3, R[0-9]+, [$]12",-"LSL",-"AND" - // s390x:"RISBGZ\t[$]49, [$]60, [$]3,",-"SLD",-"AND" + // arm64:"UBFIZ [$]3, R[0-9]+, [$]12" -"LSL" -"AND" + // s390x:"RISBGZ [$]49, [$]60, [$]3," -"SLD" -"AND" return (x & 0xfff) << 3 } func ubfiz10(x uint64) uint64 { - // arm64:"UBFIZ\t[$]4, R[0-9]+, [$]12",-"LSL",-"AND" - // s390x:"RISBGZ\t[$]48, [$]59, [$]4,",-"SLD",-"AND" + // arm64:"UBFIZ [$]4, R[0-9]+, [$]12" -"LSL" -"AND" + // s390x:"RISBGZ [$]48, [$]59, [$]4," -"SLD" -"AND" return (x << 4) & 0xfff0 } // ubfiz combinations func ubfiz11(x uint32) uint32 { - // arm64:"UBFIZ\t[$]1, R[0-9]+, [$]28",-"LSL",-"LSR" + // arm64:"UBFIZ [$]1, R[0-9]+, [$]28" -"LSL" -"LSR" return (x << 4) >> 3 } func ubfiz12(x uint64) uint64 { - // arm64:"UBFIZ\t[$]1, R[0-9]+, [$]20",-"LSL",-"LSR" - // s390x:"RISBGZ\t[$]43, [$]62, [$]1, ",-"SLD",-"SRD",-"AND" + // arm64:"UBFIZ [$]1, R[0-9]+, [$]20" -"LSL" -"LSR" + // s390x:"RISBGZ [$]43, [$]62, [$]1, " -"SLD" -"SRD" -"AND" return ((x & 0xfffff) << 4) >> 3 } func ubfiz13(x uint64) uint64 { - // arm64:"UBFIZ\t[$]5, R[0-9]+, [$]13",-"LSL",-"LSR",-"AND" + // arm64:"UBFIZ [$]5, R[0-9]+, [$]13" -"LSL" -"LSR" -"AND" return ((x << 3) & 0xffff) << 2 } func ubfiz14(x uint64) uint64 { - // arm64:"UBFIZ\t[$]7, R[0-9]+, [$]12",-"LSL",-"LSR",-"AND" - // s390x:"RISBGZ\t[$]45, [$]56, [$]7, ",-"SLD",-"SRD",-"AND" + // arm64:"UBFIZ [$]7, R[0-9]+, [$]12" -"LSL" -"LSR" -"AND" + // s390x:"RISBGZ [$]45, [$]56, [$]7, " -"SLD" -"SRD" -"AND" return ((x << 5) & (0xfff << 5)) << 2 } // ubfx // merge shifts into ubfx: (x<>rc && lc> 2 } // merge shift and zero-extension into ubfx. func ubfx2(x uint32) uint64 { - return uint64(x >> 15) // arm64:"UBFX\t[$]15, R[0-9]+, [$]17",-"LSR" + return uint64(x >> 15) // arm64:"UBFX [$]15, R[0-9]+, [$]17" -"LSR" } func ubfx3(x uint16) uint64 { - return uint64(x >> 9) // arm64:"UBFX\t[$]9, R[0-9]+, [$]7",-"LSR" + return uint64(x >> 9) // arm64:"UBFX [$]9, R[0-9]+, [$]7" -"LSR" } func ubfx4(x uint8) uint64 { - return uint64(x >> 3) // arm64:"UBFX\t[$]3, R[0-9]+, [$]5",-"LSR" + return uint64(x >> 3) // arm64:"UBFX [$]3, R[0-9]+, [$]5" -"LSR" } func ubfx5(x uint32) uint64 { - return uint64(x) >> 30 // arm64:"UBFX\t[$]30, R[0-9]+, [$]2" + return uint64(x) >> 30 // arm64:"UBFX [$]30, R[0-9]+, [$]2" } func ubfx6(x uint16) uint64 { - return uint64(x) >> 10 // arm64:"UBFX\t[$]10, R[0-9]+, [$]6" + return uint64(x) >> 10 // arm64:"UBFX [$]10, R[0-9]+, [$]6" } func ubfx7(x uint8) uint64 { - return uint64(x) >> 3 // arm64:"UBFX\t[$]3, R[0-9]+, [$]5" + return uint64(x) >> 3 // arm64:"UBFX [$]3, R[0-9]+, [$]5" } // merge ANDconst into ubfx. func ubfx8(x uint64) uint64 { - // arm64:"UBFX\t[$]25, R[0-9]+, [$]10",-"LSR",-"AND" - // s390x:"RISBGZ\t[$]54, [$]63, [$]39, ",-"SRD",-"AND" + // arm64:"UBFX [$]25, R[0-9]+, [$]10" -"LSR" -"AND" + // s390x:"RISBGZ [$]54, [$]63, [$]39, " -"SRD" -"AND" return (x >> 25) & 1023 } func ubfx9(x uint64) uint64 { - // arm64:"UBFX\t[$]4, R[0-9]+, [$]8",-"LSR",-"AND" - // s390x:"RISBGZ\t[$]56, [$]63, [$]60, ",-"SRD",-"AND" + // arm64:"UBFX [$]4, R[0-9]+, [$]8" -"LSR" -"AND" + // s390x:"RISBGZ [$]56, [$]63, [$]60, " -"SRD" -"AND" return (x & 0x0ff0) >> 4 } // ubfx combinations. func ubfx10(x uint32) uint32 { - // arm64:"UBFX\t[$]1, R[0-9]+, [$]30",-"LSL",-"LSR" + // arm64:"UBFX [$]1, R[0-9]+, [$]30" -"LSL" -"LSR" return (x << 1) >> 2 } func ubfx11(x uint64) uint64 { - // arm64:"UBFX\t[$]1, R[0-9]+, [$]12",-"LSL",-"LSR",-"AND" - // s390x:"RISBGZ\t[$]52, [$]63, [$]63,",-"SLD",-"SRD",-"AND" + // arm64:"UBFX [$]1, R[0-9]+, [$]12" -"LSL" -"LSR" -"AND" + // s390x:"RISBGZ [$]52, [$]63, [$]63," -"SLD" -"SRD" -"AND" return ((x << 1) >> 2) & 0xfff } func ubfx12(x uint64) uint64 { - // arm64:"UBFX\t[$]4, R[0-9]+, [$]11",-"LSL",-"LSR",-"AND" - // s390x:"RISBGZ\t[$]53, [$]63, [$]60, ",-"SLD",-"SRD",-"AND" + // arm64:"UBFX [$]4, R[0-9]+, [$]11" -"LSL" -"LSR" -"AND" + // s390x:"RISBGZ [$]53, [$]63, [$]60, " -"SLD" -"SRD" -"AND" return ((x >> 3) & 0xfff) >> 1 } func ubfx13(x uint64) uint64 { - // arm64:"UBFX\t[$]5, R[0-9]+, [$]56",-"LSL",-"LSR" - // s390x:"RISBGZ\t[$]8, [$]63, [$]59, ",-"SLD",-"SRD" + // arm64:"UBFX [$]5, R[0-9]+, [$]56" -"LSL" -"LSR" + // s390x:"RISBGZ [$]8, [$]63, [$]59, " -"SLD" -"SRD" return ((x >> 2) << 5) >> 8 } func ubfx14(x uint64) uint64 { - // arm64:"UBFX\t[$]1, R[0-9]+, [$]19",-"LSL",-"LSR" - // s390x:"RISBGZ\t[$]45, [$]63, [$]63, ",-"SLD",-"SRD",-"AND" + // arm64:"UBFX [$]1, R[0-9]+, [$]19" -"LSL" -"LSR" + // s390x:"RISBGZ [$]45, [$]63, [$]63, " -"SLD" -"SRD" -"AND" return ((x & 0xfffff) << 3) >> 4 } @@ -315,7 +315,7 @@ func ubfx14(x uint64) uint64 { func ubfx15(x uint64) bool { midr := x + 10 part_num := uint16((midr >> 4) & 0xfff) - if part_num == 0xd0c { // arm64:"UBFX\t[$]4, R[0-9]+, [$]12",-"MOVHU\tR[0-9]+, R[0-9]+" + if part_num == 0xd0c { // arm64:"UBFX [$]4, R[0-9]+, [$]12" -"MOVHU R[0-9]+, R[0-9]+" return true } return false @@ -323,7 +323,7 @@ func ubfx15(x uint64) bool { // merge ANDconst and ubfx into ubfx func ubfx16(x uint64) uint64 { - // arm64:"UBFX\t[$]4, R[0-9]+, [$]6",-"AND\t[$]63" + // arm64:"UBFX [$]4, R[0-9]+, [$]6" -"AND [$]63" return ((x >> 3) & 0xfff) >> 1 & 0x3f } @@ -331,48 +331,48 @@ func ubfx16(x uint64) uint64 { // //go:nosplit func shift_no_cmp(x int) int { - // arm64:`LSL\t[$]17`,-`CMP` - // mips64:`SLLV\t[$]17`,-`SGT` + // arm64:`LSL [$]17`,-`CMP` + // mips64:`SLLV [$]17`,-`SGT` return x << 17 } func rev16(c uint64) (uint64, uint64, uint64) { - // arm64:`REV16`,-`AND`,-`LSR`,-`AND`,-`ORR\tR[0-9]+<<8` + // arm64:`REV16`,-`AND`,-`LSR`,-`AND`,-`ORR R[0-9]+<<8` // loong64:`REVB4H`,-`MOVV`,-`AND`,-`SRLV`,-`AND`,-`SLLV`,-`OR` b1 := ((c & 0xff00ff00ff00ff00) >> 8) | ((c & 0x00ff00ff00ff00ff) << 8) - // arm64:-`ADD\tR[0-9]+<<8` + // arm64:-`ADD R[0-9]+<<8` // loong64:-`ADDV` b2 := ((c & 0xff00ff00ff00ff00) >> 8) + ((c & 0x00ff00ff00ff00ff) << 8) - // arm64:-`EOR\tR[0-9]+<<8` + // arm64:-`EOR R[0-9]+<<8` // loong64:-`XOR` b3 := ((c & 0xff00ff00ff00ff00) >> 8) ^ ((c & 0x00ff00ff00ff00ff) << 8) return b1, b2, b3 } func rev16w(c uint32) (uint32, uint32, uint32) { - // arm64:`REV16W`,-`AND`,-`UBFX`,-`AND`,-`ORR\tR[0-9]+<<8` + // arm64:`REV16W`,-`AND`,-`UBFX`,-`AND`,-`ORR R[0-9]+<<8` // loong64:`REVB2H`,-`AND`,-`SRL`,-`AND`,-`SLL`,-`OR` b1 := ((c & 0xff00ff00) >> 8) | ((c & 0x00ff00ff) << 8) - // arm64:-`ADD\tR[0-9]+<<8` + // arm64:-`ADD R[0-9]+<<8` // loong64:-`ADDV` b2 := ((c & 0xff00ff00) >> 8) + ((c & 0x00ff00ff) << 8) - // arm64:-`EOR\tR[0-9]+<<8` + // arm64:-`EOR R[0-9]+<<8` // loong64:-`XOR` b3 := ((c & 0xff00ff00) >> 8) ^ ((c & 0x00ff00ff) << 8) return b1, b2, b3 } func shift(x uint32, y uint16, z uint8) uint64 { - // arm64:-`MOVWU`,-`LSR\t[$]32` - // loong64:-`MOVWU`,-`SRLV\t[$]32` + // arm64:-`MOVWU`,-`LSR [$]32` + // loong64:-`MOVWU`,-`SRLV [$]32` a := uint64(x) >> 32 // arm64:-`MOVHU - // loong64:-`MOVHU`,-`SRLV\t[$]16` + // loong64:-`MOVHU`,-`SRLV [$]16` b := uint64(y) >> 16 // arm64:-`MOVBU` - // loong64:-`MOVBU`,-`SRLV\t[$]8` + // loong64:-`MOVBU`,-`SRLV [$]8` c := uint64(z) >> 8 - // arm64:`MOVD\tZR`,-`ADD\tR[0-9]+>>16`,-`ADD\tR[0-9]+>>8`, - // loong64:`MOVV\tR0`,-`ADDVU` + // arm64:`MOVD ZR`,-`ADD R[0-9]+>>16`,-`ADD R[0-9]+>>8`, + // loong64:`MOVV R0`,-`ADDVU` return a + b + c } diff --git a/test/codegen/bits.go b/test/codegen/bits.go index 7974f471fcf..39969dcdb24 100644 --- a/test/codegen/bits.go +++ b/test/codegen/bits.go @@ -13,15 +13,15 @@ import "math/bits" ************************************/ func bitcheck64_constleft(a uint64) (n int) { - // amd64:"BTQ\t[$]63" + // amd64:"BTQ [$]63" if a&(1<<63) != 0 { return 1 } - // amd64:"BTQ\t[$]60" + // amd64:"BTQ [$]60" if a&(1<<60) != 0 { return 1 } - // amd64:"BTL\t[$]0" + // amd64:"BTL [$]0" if a&(1<<0) != 0 { return 1 } @@ -29,31 +29,31 @@ func bitcheck64_constleft(a uint64) (n int) { } func bitcheck64_constright(a [8]uint64) (n int) { - // amd64:"BTQ\t[$]63" + // amd64:"BTQ [$]63" if (a[0]>>63)&1 != 0 { return 1 } - // amd64:"BTQ\t[$]63" + // amd64:"BTQ [$]63" if a[1]>>63 != 0 { return 1 } - // amd64:"BTQ\t[$]63" + // amd64:"BTQ [$]63" if a[2]>>63 == 0 { return 1 } - // amd64:"BTQ\t[$]60" + // amd64:"BTQ [$]60" if (a[3]>>60)&1 == 0 { return 1 } - // amd64:"BTL\t[$]1" + // amd64:"BTL [$]1" if (a[4]>>1)&1 == 0 { return 1 } - // amd64:"BTL\t[$]0" + // amd64:"BTL [$]0" if (a[5]>>0)&1 == 0 { return 1 } - // amd64:"BTL\t[$]7" + // amd64:"BTL [$]7" if (a[6]>>5)&4 == 0 { return 1 } @@ -65,7 +65,7 @@ func bitcheck64_var(a, b uint64) (n int) { if a&(1<<(b&63)) != 0 { return 1 } - // amd64:"BTQ",-"BT.\t[$]0" + // amd64:"BTQ" -"BT. [$]0" if (b>>(a&63))&1 != 0 { return 1 } @@ -73,15 +73,15 @@ func bitcheck64_var(a, b uint64) (n int) { } func bitcheck64_mask(a uint64) (n int) { - // amd64:"BTQ\t[$]63" + // amd64:"BTQ [$]63" if a&0x8000000000000000 != 0 { return 1 } - // amd64:"BTQ\t[$]59" + // amd64:"BTQ [$]59" if a&0x800000000000000 != 0 { return 1 } - // amd64:"BTL\t[$]0" + // amd64:"BTL [$]0" if a&0x1 != 0 { return 1 } @@ -92,13 +92,13 @@ func biton64(a, b uint64) (n uint64) { // amd64:"BTSQ" n += b | (1 << (a & 63)) - // amd64:"BTSQ\t[$]63" + // amd64:"BTSQ [$]63" n += a | (1 << 63) - // amd64:"BTSQ\t[$]60" + // amd64:"BTSQ [$]60" n += a | (1 << 60) - // amd64:"ORQ\t[$]1" + // amd64:"ORQ [$]1" n += a | (1 << 0) return n @@ -108,23 +108,23 @@ func bitoff64(a, b uint64) (n uint64) { // amd64:"BTRQ" n += b &^ (1 << (a & 63)) - // amd64:"BTRQ\t[$]63" + // amd64:"BTRQ [$]63" n += a &^ (1 << 63) - // amd64:"BTRQ\t[$]60" + // amd64:"BTRQ [$]60" n += a &^ (1 << 60) - // amd64:"ANDQ\t[$]-2" + // amd64:"ANDQ [$]-2" n += a &^ (1 << 0) return n } func clearLastBit(x int64, y int32) (int64, int32) { - // amd64:"ANDQ\t[$]-2" + // amd64:"ANDQ [$]-2" a := (x >> 1) << 1 - // amd64:"ANDL\t[$]-2" + // amd64:"ANDL [$]-2" b := (y >> 1) << 1 return a, b @@ -134,13 +134,13 @@ func bitcompl64(a, b uint64) (n uint64) { // amd64:"BTCQ" n += b ^ (1 << (a & 63)) - // amd64:"BTCQ\t[$]63" + // amd64:"BTCQ [$]63" n += a ^ (1 << 63) - // amd64:"BTCQ\t[$]60" + // amd64:"BTCQ [$]60" n += a ^ (1 << 60) - // amd64:"XORQ\t[$]1" + // amd64:"XORQ [$]1" n += a ^ (1 << 0) return n @@ -151,15 +151,15 @@ func bitcompl64(a, b uint64) (n uint64) { ************************************/ func bitcheck32_constleft(a uint32) (n int) { - // amd64:"BTL\t[$]31" + // amd64:"BTL [$]31" if a&(1<<31) != 0 { return 1 } - // amd64:"BTL\t[$]28" + // amd64:"BTL [$]28" if a&(1<<28) != 0 { return 1 } - // amd64:"BTL\t[$]0" + // amd64:"BTL [$]0" if a&(1<<0) != 0 { return 1 } @@ -167,31 +167,31 @@ func bitcheck32_constleft(a uint32) (n int) { } func bitcheck32_constright(a [8]uint32) (n int) { - // amd64:"BTL\t[$]31" + // amd64:"BTL [$]31" if (a[0]>>31)&1 != 0 { return 1 } - // amd64:"BTL\t[$]31" + // amd64:"BTL [$]31" if a[1]>>31 != 0 { return 1 } - // amd64:"BTL\t[$]31" + // amd64:"BTL [$]31" if a[2]>>31 == 0 { return 1 } - // amd64:"BTL\t[$]28" + // amd64:"BTL [$]28" if (a[3]>>28)&1 == 0 { return 1 } - // amd64:"BTL\t[$]1" + // amd64:"BTL [$]1" if (a[4]>>1)&1 == 0 { return 1 } - // amd64:"BTL\t[$]0" + // amd64:"BTL [$]0" if (a[5]>>0)&1 == 0 { return 1 } - // amd64:"BTL\t[$]7" + // amd64:"BTL [$]7" if (a[6]>>5)&4 == 0 { return 1 } @@ -203,7 +203,7 @@ func bitcheck32_var(a, b uint32) (n int) { if a&(1<<(b&31)) != 0 { return 1 } - // amd64:"BTL",-"BT.\t[$]0" + // amd64:"BTL" -"BT. [$]0" if (b>>(a&31))&1 != 0 { return 1 } @@ -211,15 +211,15 @@ func bitcheck32_var(a, b uint32) (n int) { } func bitcheck32_mask(a uint32) (n int) { - // amd64:"BTL\t[$]31" + // amd64:"BTL [$]31" if a&0x80000000 != 0 { return 1 } - // amd64:"BTL\t[$]27" + // amd64:"BTL [$]27" if a&0x8000000 != 0 { return 1 } - // amd64:"BTL\t[$]0" + // amd64:"BTL [$]0" if a&0x1 != 0 { return 1 } @@ -230,13 +230,13 @@ func biton32(a, b uint32) (n uint32) { // amd64:"BTSL" n += b | (1 << (a & 31)) - // amd64:"ORL\t[$]-2147483648" + // amd64:"ORL [$]-2147483648" n += a | (1 << 31) - // amd64:"ORL\t[$]268435456" + // amd64:"ORL [$]268435456" n += a | (1 << 28) - // amd64:"ORL\t[$]1" + // amd64:"ORL [$]1" n += a | (1 << 0) return n @@ -246,13 +246,13 @@ func bitoff32(a, b uint32) (n uint32) { // amd64:"BTRL" n += b &^ (1 << (a & 31)) - // amd64:"ANDL\t[$]2147483647" + // amd64:"ANDL [$]2147483647" n += a &^ (1 << 31) - // amd64:"ANDL\t[$]-268435457" + // amd64:"ANDL [$]-268435457" n += a &^ (1 << 28) - // amd64:"ANDL\t[$]-2" + // amd64:"ANDL [$]-2" n += a &^ (1 << 0) return n @@ -262,13 +262,13 @@ func bitcompl32(a, b uint32) (n uint32) { // amd64:"BTCL" n += b ^ (1 << (a & 31)) - // amd64:"XORL\t[$]-2147483648" + // amd64:"XORL [$]-2147483648" n += a ^ (1 << 31) - // amd64:"XORL\t[$]268435456" + // amd64:"XORL [$]268435456" n += a ^ (1 << 28) - // amd64:"XORL\t[$]1" + // amd64:"XORL [$]1" n += a ^ (1 << 0) return n @@ -292,12 +292,12 @@ func bitcheckMostNegative(b uint8) bool { // Check AND masking on arm64 (Issue #19857) func and_mask_1(a uint64) uint64 { - // arm64:`AND\t` + // arm64:`AND ` return a & ((1 << 63) - 1) } func and_mask_2(a uint64) uint64 { - // arm64:`AND\t` + // arm64:`AND ` return a & (1 << 63) } @@ -312,65 +312,65 @@ func and_mask_3(a, b uint32) (uint32, uint32) { // Check generation of arm64 BIC/EON/ORN instructions func op_bic(x, y uint32) uint32 { - // arm64:`BIC\t`,-`AND` + // arm64:`BIC `,-`AND` return x &^ y } func op_eon(x, y, z uint32, a []uint32, n, m uint64) uint64 { - // arm64:`EON\t`,-`EOR`,-`MVN` + // arm64:`EON `,-`EOR`,-`MVN` a[0] = x ^ (y ^ 0xffffffff) - // arm64:`EON\t`,-`EOR`,-`MVN` + // arm64:`EON `,-`EOR`,-`MVN` a[1] = ^(y ^ z) - // arm64:`EON\t`,-`XOR` + // arm64:`EON `,-`XOR` a[2] = x ^ ^z - // arm64:`EON\t`,-`EOR`,-`MVN` + // arm64:`EON `,-`EOR`,-`MVN` return n ^ (m ^ 0xffffffffffffffff) } func op_orn(x, y uint32) uint32 { - // arm64:`ORN\t`,-`ORR` - // loong64:"ORN"\t,-"OR\t" + // arm64:`ORN `,-`ORR` + // loong64:"ORN" ,-"OR " return x | ^y } func op_nor(x int64, a []int64) { - // loong64: "MOVV\t[$]0","NOR\tR" + // loong64: "MOVV [$]0" "NOR R" a[0] = ^(0x1234 | x) - // loong64:"NOR",-"XOR" + // loong64:"NOR" -"XOR" a[1] = (-1) ^ x - // loong64: "MOVV\t[$]-55",-"OR",-"NOR" + // loong64: "MOVV [$]-55" -"OR" -"NOR" a[2] = ^(0x12 | 0x34) } func op_andn(x, y uint32) uint32 { - // loong64:"ANDN\t",-"AND\t" + // loong64:"ANDN " -"AND " return x &^ y } // check bitsets func bitSetPowerOf2Test(x int) bool { - // amd64:"BTL\t[$]3" + // amd64:"BTL [$]3" return x&8 == 8 } func bitSetTest(x int) bool { - // amd64:"ANDL\t[$]9, AX" - // amd64:"CMPQ\tAX, [$]9" + // amd64:"ANDL [$]9, AX" + // amd64:"CMPQ AX, [$]9" return x&9 == 9 } // mask contiguous one bits func cont1Mask64U(x uint64) uint64 { - // s390x:"RISBGZ\t[$]16, [$]47, [$]0," + // s390x:"RISBGZ [$]16, [$]47, [$]0," return x & 0x0000ffffffff0000 } // mask contiguous zero bits func cont0Mask64U(x uint64) uint64 { - // s390x:"RISBGZ\t[$]48, [$]15, [$]0," + // s390x:"RISBGZ [$]48, [$]15, [$]0," return x & 0xffff00000000ffff } @@ -390,60 +390,60 @@ func issue48467(x, y uint64) uint64 { } func foldConst(x, y uint64) uint64 { - // arm64: "ADDS\t[$]7",-"MOVD\t[$]7" - // ppc64x: "ADDC\t[$]7," + // arm64: "ADDS [$]7" -"MOVD [$]7" + // ppc64x: "ADDC [$]7," d, b := bits.Add64(x, 7, 0) return b & d } func foldConstOutOfRange(a uint64) uint64 { - // arm64: "MOVD\t[$]19088744",-"ADD\t[$]19088744" + // arm64: "MOVD [$]19088744" -"ADD [$]19088744" return a + 0x1234568 } // Verify sign-extended values are not zero-extended under a bit mask (#61297) func signextendAndMask8to64(a int8) (s, z uint64) { - // ppc64x: "MOVB", "ANDCC\t[$]1015," + // ppc64x: "MOVB", "ANDCC [$]1015," s = uint64(a) & 0x3F7 - // ppc64x: -"MOVB", "ANDCC\t[$]247," + // ppc64x: -"MOVB", "ANDCC [$]247," z = uint64(uint8(a)) & 0x3F7 return } // Verify zero-extended values are not sign-extended under a bit mask (#61297) func zeroextendAndMask8to64(a int8, b int16) (x, y uint64) { - // ppc64x: -"MOVB\t", -"ANDCC", "MOVBZ" + // ppc64x: -"MOVB ", -"ANDCC", "MOVBZ" x = uint64(a) & 0xFF - // ppc64x: -"MOVH\t", -"ANDCC", "MOVHZ" + // ppc64x: -"MOVH ", -"ANDCC", "MOVHZ" y = uint64(b) & 0xFFFF return } // Verify rotate and mask instructions, and further simplified instructions for small types func bitRotateAndMask(io64 [8]uint64, io32 [4]uint32, io16 [4]uint16, io8 [4]uint8) { - // ppc64x: "RLDICR\t[$]0, R[0-9]*, [$]47, R" + // ppc64x: "RLDICR [$]0, R[0-9]*, [$]47, R" io64[0] = io64[0] & 0xFFFFFFFFFFFF0000 - // ppc64x: "RLDICL\t[$]0, R[0-9]*, [$]16, R" + // ppc64x: "RLDICL [$]0, R[0-9]*, [$]16, R" io64[1] = io64[1] & 0x0000FFFFFFFFFFFF - // ppc64x: -"SRD", -"AND", "RLDICL\t[$]60, R[0-9]*, [$]16, R" + // ppc64x: -"SRD", -"AND", "RLDICL [$]60, R[0-9]*, [$]16, R" io64[2] = (io64[2] >> 4) & 0x0000FFFFFFFFFFFF - // ppc64x: -"SRD", -"AND", "RLDICL\t[$]36, R[0-9]*, [$]28, R" + // ppc64x: -"SRD", -"AND", "RLDICL [$]36, R[0-9]*, [$]28, R" io64[3] = (io64[3] >> 28) & 0x0000FFFFFFFFFFFF - // ppc64x: "MOVWZ", "RLWNM\t[$]1, R[0-9]*, [$]28, [$]3, R" + // ppc64x: "MOVWZ", "RLWNM [$]1, R[0-9]*, [$]28, [$]3, R" io64[4] = uint64(bits.RotateLeft32(io32[0], 1) & 0xF000000F) - // ppc64x: "RLWNM\t[$]0, R[0-9]*, [$]4, [$]19, R" + // ppc64x: "RLWNM [$]0, R[0-9]*, [$]4, [$]19, R" io32[0] = io32[0] & 0x0FFFF000 - // ppc64x: "RLWNM\t[$]0, R[0-9]*, [$]20, [$]3, R" + // ppc64x: "RLWNM [$]0, R[0-9]*, [$]20, [$]3, R" io32[1] = io32[1] & 0xF0000FFF // ppc64x: -"RLWNM", MOVD, AND io32[2] = io32[2] & 0xFFFF0002 var bigc uint32 = 0x12345678 - // ppc64x: "ANDCC\t[$]22136" + // ppc64x: "ANDCC [$]22136" io16[0] = io16[0] & uint16(bigc) - // ppc64x: "ANDCC\t[$]120" + // ppc64x: "ANDCC [$]120" io8[0] = io8[0] & uint8(bigc) } diff --git a/test/codegen/bmi.go b/test/codegen/bmi.go index aa61b03928a..c50f15a7e0c 100644 --- a/test/codegen/bmi.go +++ b/test/codegen/bmi.go @@ -47,41 +47,41 @@ func blsr32(x int32) int32 { } func isPowerOfTwo64(x int64) bool { - // amd64/v3:"BLSRQ",-"TESTQ",-"CALL" + // amd64/v3:"BLSRQ" -"TESTQ" -"CALL" return blsr64(x) == 0 } func isPowerOfTwo32(x int32) bool { - // amd64/v3:"BLSRL",-"TESTL",-"CALL" + // amd64/v3:"BLSRL" -"TESTL" -"CALL" return blsr32(x) == 0 } func isPowerOfTwoSelect64(x, a, b int64) int64 { var r int64 - // amd64/v3:"BLSRQ",-"TESTQ",-"CALL" + // amd64/v3:"BLSRQ" -"TESTQ" -"CALL" if isPowerOfTwo64(x) { r = a } else { r = b } - // amd64/v3:"CMOVQEQ",-"TESTQ",-"CALL" + // amd64/v3:"CMOVQEQ" -"TESTQ" -"CALL" return r * 2 // force return blocks joining } func isPowerOfTwoSelect32(x, a, b int32) int32 { var r int32 - // amd64/v3:"BLSRL",-"TESTL",-"CALL" + // amd64/v3:"BLSRL" -"TESTL" -"CALL" if isPowerOfTwo32(x) { r = a } else { r = b } - // amd64/v3:"CMOVLEQ",-"TESTL",-"CALL" + // amd64/v3:"CMOVLEQ" -"TESTL" -"CALL" return r * 2 // force return blocks joining } func isPowerOfTwoBranch64(x int64, a func(bool), b func(string)) { - // amd64/v3:"BLSRQ",-"TESTQ",-"CALL" + // amd64/v3:"BLSRQ" -"TESTQ" -"CALL" if isPowerOfTwo64(x) { a(true) } else { @@ -90,7 +90,7 @@ func isPowerOfTwoBranch64(x int64, a func(bool), b func(string)) { } func isPowerOfTwoBranch32(x int32, a func(bool), b func(string)) { - // amd64/v3:"BLSRL",-"TESTL",-"CALL" + // amd64/v3:"BLSRL" -"TESTL" -"CALL" if isPowerOfTwo32(x) { a(true) } else { @@ -99,41 +99,41 @@ func isPowerOfTwoBranch32(x int32, a func(bool), b func(string)) { } func isNotPowerOfTwo64(x int64) bool { - // amd64/v3:"BLSRQ",-"TESTQ",-"CALL" + // amd64/v3:"BLSRQ" -"TESTQ" -"CALL" return blsr64(x) != 0 } func isNotPowerOfTwo32(x int32) bool { - // amd64/v3:"BLSRL",-"TESTL",-"CALL" + // amd64/v3:"BLSRL" -"TESTL" -"CALL" return blsr32(x) != 0 } func isNotPowerOfTwoSelect64(x, a, b int64) int64 { var r int64 - // amd64/v3:"BLSRQ",-"TESTQ",-"CALL" + // amd64/v3:"BLSRQ" -"TESTQ" -"CALL" if isNotPowerOfTwo64(x) { r = a } else { r = b } - // amd64/v3:"CMOVQNE",-"TESTQ",-"CALL" + // amd64/v3:"CMOVQNE" -"TESTQ" -"CALL" return r * 2 // force return blocks joining } func isNotPowerOfTwoSelect32(x, a, b int32) int32 { var r int32 - // amd64/v3:"BLSRL",-"TESTL",-"CALL" + // amd64/v3:"BLSRL" -"TESTL" -"CALL" if isNotPowerOfTwo32(x) { r = a } else { r = b } - // amd64/v3:"CMOVLNE",-"TESTL",-"CALL" + // amd64/v3:"CMOVLNE" -"TESTL" -"CALL" return r * 2 // force return blocks joining } func isNotPowerOfTwoBranch64(x int64, a func(bool), b func(string)) { - // amd64/v3:"BLSRQ",-"TESTQ",-"CALL" + // amd64/v3:"BLSRQ" -"TESTQ" -"CALL" if isNotPowerOfTwo64(x) { a(true) } else { @@ -142,7 +142,7 @@ func isNotPowerOfTwoBranch64(x int64, a func(bool), b func(string)) { } func isNotPowerOfTwoBranch32(x int32, a func(bool), b func(string)) { - // amd64/v3:"BLSRL",-"TESTL",-"CALL" + // amd64/v3:"BLSRL" -"TESTL" -"CALL" if isNotPowerOfTwo32(x) { a(true) } else { @@ -161,17 +161,17 @@ func sarx32(x, y int32) int32 { } func sarx64_load(x []int64, i int) int64 { - // amd64/v3: `SARXQ\t[A-Z]+[0-9]*, \([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` + // amd64/v3: `SARXQ [A-Z]+[0-9]*, \([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` s := x[i] >> (i & 63) - // amd64/v3: `SARXQ\t[A-Z]+[0-9]*, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` + // amd64/v3: `SARXQ [A-Z]+[0-9]*, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` s = x[i+1] >> (s & 63) return s } func sarx32_load(x []int32, i int) int32 { - // amd64/v3: `SARXL\t[A-Z]+[0-9]*, \([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` + // amd64/v3: `SARXL [A-Z]+[0-9]*, \([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` s := x[i] >> (i & 63) - // amd64/v3: `SARXL\t[A-Z]+[0-9]*, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` + // amd64/v3: `SARXL [A-Z]+[0-9]*, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` s = x[i+1] >> (s & 63) return s } @@ -193,17 +193,17 @@ func shlrx32(x, y uint32) uint32 { } func shlrx64_load(x []uint64, i int, s uint64) uint64 { - // amd64/v3: `SHRXQ\t[A-Z]+[0-9]*, \([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` + // amd64/v3: `SHRXQ [A-Z]+[0-9]*, \([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` s = x[i] >> i - // amd64/v3: `SHLXQ\t[A-Z]+[0-9]*, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` + // amd64/v3: `SHLXQ [A-Z]+[0-9]*, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` s = x[i+1] << s return s } func shlrx32_load(x []uint32, i int, s uint32) uint32 { - // amd64/v3: `SHRXL\t[A-Z]+[0-9]*, \([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` + // amd64/v3: `SHRXL [A-Z]+[0-9]*, \([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` s = x[i] >> i - // amd64/v3: `SHLXL\t[A-Z]+[0-9]*, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` + // amd64/v3: `SHLXL [A-Z]+[0-9]*, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` s = x[i+1] << s return s } diff --git a/test/codegen/bool.go b/test/codegen/bool.go index 760dbbcf7b5..37f85a45b71 100644 --- a/test/codegen/bool.go +++ b/test/codegen/bool.go @@ -13,52 +13,52 @@ import ( // This file contains codegen tests related to boolean simplifications/optimizations. func convertNeq0B(x uint8, c bool) bool { - // amd64:"ANDL\t[$]1",-"SETNE" - // ppc64x:"RLDICL",-"CMPW",-"ISEL" + // amd64:"ANDL [$]1" -"SETNE" + // ppc64x:"RLDICL" -"CMPW" -"ISEL" b := x&1 != 0 return c && b } func convertNeq0W(x uint16, c bool) bool { - // amd64:"ANDL\t[$]1",-"SETNE" - // ppc64x:"RLDICL",-"CMPW",-"ISEL" + // amd64:"ANDL [$]1" -"SETNE" + // ppc64x:"RLDICL" -"CMPW" -"ISEL" b := x&1 != 0 return c && b } func convertNeq0L(x uint32, c bool) bool { - // amd64:"ANDL\t[$]1",-"SETB" - // ppc64x:"RLDICL",-"CMPW",-"ISEL" + // amd64:"ANDL [$]1" -"SETB" + // ppc64x:"RLDICL" -"CMPW" -"ISEL" b := x&1 != 0 return c && b } func convertNeq0Q(x uint64, c bool) bool { - // amd64:"ANDL\t[$]1",-"SETB" - // ppc64x:"RLDICL",-"CMP",-"ISEL" + // amd64:"ANDL [$]1" -"SETB" + // ppc64x:"RLDICL" -"CMP" -"ISEL" b := x&1 != 0 return c && b } func convertNeqBool32(x uint32) bool { - // ppc64x:"RLDICL",-"CMPW",-"ISEL" + // ppc64x:"RLDICL" -"CMPW" -"ISEL" return x&1 != 0 } func convertEqBool32(x uint32) bool { - // ppc64x:"RLDICL",-"CMPW","XOR",-"ISEL" - // amd64:"ANDL","XORL",-"BTL",-"SETCC" + // ppc64x:"RLDICL" -"CMPW" "XOR" -"ISEL" + // amd64:"ANDL" "XORL" -"BTL" -"SETCC" return x&1 == 0 } func convertNeqBool64(x uint64) bool { - // ppc64x:"RLDICL",-"CMP",-"ISEL" + // ppc64x:"RLDICL" -"CMP" -"ISEL" return x&1 != 0 } func convertEqBool64(x uint64) bool { - // ppc64x:"RLDICL","XOR",-"CMP",-"ISEL" - // amd64:"ANDL","XORL",-"BTL",-"SETCC" + // ppc64x:"RLDICL" "XOR" -"CMP" -"ISEL" + // amd64:"ANDL" "XORL" -"BTL" -"SETCC" return x&1 == 0 } @@ -87,157 +87,157 @@ func phiOr(a, b bool) bool { } func TestSetEq64(x uint64, y uint64) bool { - // ppc64x/power10:"SETBC\tCR0EQ",-"ISEL" - // ppc64x/power9:"CMP","ISEL",-"SETBC\tCR0EQ" - // ppc64x/power8:"CMP","ISEL",-"SETBC\tCR0EQ" + // ppc64x/power10:"SETBC CR0EQ" -"ISEL" + // ppc64x/power9:"CMP" "ISEL" -"SETBC CR0EQ" + // ppc64x/power8:"CMP" "ISEL" -"SETBC CR0EQ" b := x == y return b } func TestSetNeq64(x uint64, y uint64) bool { - // ppc64x/power10:"SETBCR\tCR0EQ",-"ISEL" - // ppc64x/power9:"CMP","ISEL",-"SETBCR\tCR0EQ" - // ppc64x/power8:"CMP","ISEL",-"SETBCR\tCR0EQ" + // ppc64x/power10:"SETBCR CR0EQ" -"ISEL" + // ppc64x/power9:"CMP" "ISEL" -"SETBCR CR0EQ" + // ppc64x/power8:"CMP" "ISEL" -"SETBCR CR0EQ" b := x != y return b } func TestSetLt64(x uint64, y uint64) bool { - // ppc64x/power10:"SETBC\tCR0GT",-"ISEL" - // ppc64x/power9:"CMP","ISEL",-"SETBC\tCR0GT" - // ppc64x/power8:"CMP","ISEL",-"SETBC\tCR0GT" + // ppc64x/power10:"SETBC CR0GT" -"ISEL" + // ppc64x/power9:"CMP" "ISEL" -"SETBC CR0GT" + // ppc64x/power8:"CMP" "ISEL" -"SETBC CR0GT" b := x < y return b } func TestSetLe64(x uint64, y uint64) bool { - // ppc64x/power10:"SETBCR\tCR0LT",-"ISEL" - // ppc64x/power9:"CMP","ISEL",-"SETBCR\tCR0LT" - // ppc64x/power8:"CMP","ISEL",-"SETBCR\tCR0LT" + // ppc64x/power10:"SETBCR CR0LT" -"ISEL" + // ppc64x/power9:"CMP" "ISEL" -"SETBCR CR0LT" + // ppc64x/power8:"CMP" "ISEL" -"SETBCR CR0LT" b := x <= y return b } func TestSetGt64(x uint64, y uint64) bool { - // ppc64x/power10:"SETBC\tCR0LT",-"ISEL" - // ppc64x/power9:"CMP","ISEL",-"SETBC\tCR0LT" - // ppc64x/power8:"CMP","ISEL",-"SETBC\tCR0LT" + // ppc64x/power10:"SETBC CR0LT" -"ISEL" + // ppc64x/power9:"CMP" "ISEL" -"SETBC CR0LT" + // ppc64x/power8:"CMP" "ISEL" -"SETBC CR0LT" b := x > y return b } func TestSetGe64(x uint64, y uint64) bool { - // ppc64x/power10:"SETBCR\tCR0GT",-"ISEL" - // ppc64x/power9:"CMP","ISEL",-"SETBCR\tCR0GT" - // ppc64x/power8:"CMP","ISEL",-"SETBCR\tCR0GT" + // ppc64x/power10:"SETBCR CR0GT" -"ISEL" + // ppc64x/power9:"CMP" "ISEL" -"SETBCR CR0GT" + // ppc64x/power8:"CMP" "ISEL" -"SETBCR CR0GT" b := x >= y return b } func TestSetLtFp64(x float64, y float64) bool { - // ppc64x/power10:"SETBC\tCR0LT",-"ISEL" - // ppc64x/power9:"FCMP","ISEL",-"SETBC\tCR0LT" - // ppc64x/power8:"FCMP","ISEL",-"SETBC\tCR0LT" + // ppc64x/power10:"SETBC CR0LT" -"ISEL" + // ppc64x/power9:"FCMP" "ISEL" -"SETBC CR0LT" + // ppc64x/power8:"FCMP" "ISEL" -"SETBC CR0LT" b := x < y return b } func TestSetLeFp64(x float64, y float64) bool { - // ppc64x/power10:"SETBC\tCR0LT","SETBC\tCR0EQ","OR",-"ISEL",-"ISEL" - // ppc64x/power9:"ISEL","ISEL",-"SETBC\tCR0LT",-"SETBC\tCR0EQ","OR" - // ppc64x/power8:"ISEL","ISEL",-"SETBC\tCR0LT",-"SETBC\tCR0EQ","OR" + // ppc64x/power10:"SETBC CR0LT" "SETBC CR0EQ" "OR" -"ISEL" -"ISEL" + // ppc64x/power9:"ISEL" "ISEL" -"SETBC CR0LT" -"SETBC CR0EQ" "OR" + // ppc64x/power8:"ISEL" "ISEL" -"SETBC CR0LT" -"SETBC CR0EQ" "OR" b := x <= y return b } func TestSetGtFp64(x float64, y float64) bool { - // ppc64x/power10:"SETBC\tCR0LT",-"ISEL" - // ppc64x/power9:"FCMP","ISEL",-"SETBC\tCR0LT" - // ppc64x/power8:"FCMP","ISEL",-"SETBC\tCR0LT" + // ppc64x/power10:"SETBC CR0LT" -"ISEL" + // ppc64x/power9:"FCMP" "ISEL" -"SETBC CR0LT" + // ppc64x/power8:"FCMP" "ISEL" -"SETBC CR0LT" b := x > y return b } func TestSetGeFp64(x float64, y float64) bool { - // ppc64x/power10:"SETBC\tCR0LT","SETBC\tCR0EQ","OR",-"ISEL",-"ISEL" - // ppc64x/power9:"ISEL","ISEL",-"SETBC\tCR0LT",-"SETBC\tCR0EQ","OR" - // ppc64x/power8:"ISEL","ISEL",-"SETBC\tCR0LT",-"SETBC\tCR0EQ","OR" + // ppc64x/power10:"SETBC CR0LT" "SETBC CR0EQ" "OR" -"ISEL" -"ISEL" + // ppc64x/power9:"ISEL" "ISEL" -"SETBC CR0LT" -"SETBC CR0EQ" "OR" + // ppc64x/power8:"ISEL" "ISEL" -"SETBC CR0LT" -"SETBC CR0EQ" "OR" b := x >= y return b } func TestSetInvEq64(x uint64, y uint64) bool { - // ppc64x/power10:"SETBCR\tCR0EQ",-"ISEL" - // ppc64x/power9:"CMP","ISEL",-"SETBCR\tCR0EQ" - // ppc64x/power8:"CMP","ISEL",-"SETBCR\tCR0EQ" + // ppc64x/power10:"SETBCR CR0EQ" -"ISEL" + // ppc64x/power9:"CMP" "ISEL" -"SETBCR CR0EQ" + // ppc64x/power8:"CMP" "ISEL" -"SETBCR CR0EQ" b := !(x == y) return b } func TestSetInvNeq64(x uint64, y uint64) bool { - // ppc64x/power10:"SETBC\tCR0EQ",-"ISEL" - // ppc64x/power9:"CMP","ISEL",-"SETBC\tCR0EQ" - // ppc64x/power8:"CMP","ISEL",-"SETBC\tCR0EQ" + // ppc64x/power10:"SETBC CR0EQ" -"ISEL" + // ppc64x/power9:"CMP" "ISEL" -"SETBC CR0EQ" + // ppc64x/power8:"CMP" "ISEL" -"SETBC CR0EQ" b := !(x != y) return b } func TestSetInvLt64(x uint64, y uint64) bool { - // ppc64x/power10:"SETBCR\tCR0GT",-"ISEL" - // ppc64x/power9:"CMP","ISEL",-"SETBCR\tCR0GT" - // ppc64x/power8:"CMP","ISEL",-"SETBCR\tCR0GT" + // ppc64x/power10:"SETBCR CR0GT" -"ISEL" + // ppc64x/power9:"CMP" "ISEL" -"SETBCR CR0GT" + // ppc64x/power8:"CMP" "ISEL" -"SETBCR CR0GT" b := !(x < y) return b } func TestSetInvLe64(x uint64, y uint64) bool { - // ppc64x/power10:"SETBC\tCR0LT",-"ISEL" - // ppc64x/power9:"CMP","ISEL",-"SETBC\tCR0LT" - // ppc64x/power8:"CMP","ISEL",-"SETBC\tCR0LT" + // ppc64x/power10:"SETBC CR0LT" -"ISEL" + // ppc64x/power9:"CMP" "ISEL" -"SETBC CR0LT" + // ppc64x/power8:"CMP" "ISEL" -"SETBC CR0LT" b := !(x <= y) return b } func TestSetInvGt64(x uint64, y uint64) bool { - // ppc64x/power10:"SETBCR\tCR0LT",-"ISEL" - // ppc64x/power9:"CMP","ISEL",-"SETBCR\tCR0LT" - // ppc64x/power8:"CMP","ISEL",-"SETBCR\tCR0LT" + // ppc64x/power10:"SETBCR CR0LT" -"ISEL" + // ppc64x/power9:"CMP" "ISEL" -"SETBCR CR0LT" + // ppc64x/power8:"CMP" "ISEL" -"SETBCR CR0LT" b := !(x > y) return b } func TestSetInvGe64(x uint64, y uint64) bool { - // ppc64x/power10:"SETBC\tCR0GT",-"ISEL" - // ppc64x/power9:"CMP","ISEL",-"SETBC\tCR0GT" - // ppc64x/power8:"CMP","ISEL",-"SETBC\tCR0GT" + // ppc64x/power10:"SETBC CR0GT" -"ISEL" + // ppc64x/power9:"CMP" "ISEL" -"SETBC CR0GT" + // ppc64x/power8:"CMP" "ISEL" -"SETBC CR0GT" b := !(x >= y) return b } func TestSetInvEqFp64(x float64, y float64) bool { - // ppc64x/power10:"SETBCR\tCR0EQ",-"ISEL" - // ppc64x/power9:"FCMP","ISEL",-"SETBCR\tCR0EQ" - // ppc64x/power8:"FCMP","ISEL",-"SETBCR\tCR0EQ" + // ppc64x/power10:"SETBCR CR0EQ" -"ISEL" + // ppc64x/power9:"FCMP" "ISEL" -"SETBCR CR0EQ" + // ppc64x/power8:"FCMP" "ISEL" -"SETBCR CR0EQ" b := !(x == y) return b } func TestSetInvNeqFp64(x float64, y float64) bool { - // ppc64x/power10:"SETBC\tCR0EQ",-"ISEL" - // ppc64x/power9:"FCMP","ISEL",-"SETBC\tCR0EQ" - // ppc64x/power8:"FCMP","ISEL",-"SETBC\tCR0EQ" + // ppc64x/power10:"SETBC CR0EQ" -"ISEL" + // ppc64x/power9:"FCMP" "ISEL" -"SETBC CR0EQ" + // ppc64x/power8:"FCMP" "ISEL" -"SETBC CR0EQ" b := !(x != y) return b } func TestSetInvLtFp64(x float64, y float64) bool { - // ppc64x/power10:"SETBCR\tCR0LT",-"ISEL" - // ppc64x/power9:"FCMP","ISEL",-"SETBCR\tCR0LT" - // ppc64x/power8:"FCMP","ISEL",-"SETBCR\tCR0LT" + // ppc64x/power10:"SETBCR CR0LT" -"ISEL" + // ppc64x/power9:"FCMP" "ISEL" -"SETBCR CR0LT" + // ppc64x/power8:"FCMP" "ISEL" -"SETBCR CR0LT" b := !(x < y) return b } func TestSetInvLeFp64(x float64, y float64) bool { - // ppc64x/power10:"SETBC\tCR0LT",-"ISEL" - // ppc64x/power9:"FCMP","ISEL",-"SETBC\tCR0LT" - // ppc64x/power8:"FCMP","ISEL",-"SETBC\tCR0LT" + // ppc64x/power10:"SETBC CR0LT" -"ISEL" + // ppc64x/power9:"FCMP" "ISEL" -"SETBC CR0LT" + // ppc64x/power8:"FCMP" "ISEL" -"SETBC CR0LT" b := !(x <= y) return b } func TestSetInvGtFp64(x float64, y float64) bool { - // ppc64x/power10:"SETBCR\tCR0LT",-"ISEL" - // ppc64x/power9:"FCMP","ISEL",-"SETBCR\tCR0LT" - // ppc64x/power8:"FCMP","ISEL",-"SETBCR\tCR0LT" + // ppc64x/power10:"SETBCR CR0LT" -"ISEL" + // ppc64x/power9:"FCMP" "ISEL" -"SETBCR CR0LT" + // ppc64x/power8:"FCMP" "ISEL" -"SETBCR CR0LT" b := !(x > y) return b } func TestSetInvGeFp64(x float64, y float64) bool { - // ppc64x/power10:"SETBC\tCR0LT",-"ISEL" - // ppc64x/power9:"FCMP","ISEL",-"SETBC\tCR0LT" - // ppc64x/power8:"FCMP","ISEL",-"SETBC\tCR0LT" + // ppc64x/power10:"SETBC CR0LT" -"ISEL" + // ppc64x/power9:"FCMP" "ISEL" -"SETBC CR0LT" + // ppc64x/power8:"FCMP" "ISEL" -"SETBC CR0LT" b := !(x >= y) return b } @@ -293,7 +293,7 @@ func TestLogicalCompareZero(x *[64]uint64) { x[12] = b } - // ppc64x:"ADDCCC\t[$]4," + // ppc64x:"ADDCCC [$]4," c := int64(x[12]) + 4 if c <= 0 { x[12] = uint64(c) @@ -309,7 +309,7 @@ func TestLogicalCompareZero(x *[64]uint64) { func constantWrite(b bool, p *bool) { if b { - // amd64:`MOVB\t[$]1, \(` + // amd64:`MOVB [$]1, \(` *p = b } } diff --git a/test/codegen/clobberdead.go b/test/codegen/clobberdead.go index 13d2efbbe58..df44705cfc5 100644 --- a/test/codegen/clobberdead.go +++ b/test/codegen/clobberdead.go @@ -15,16 +15,16 @@ var p1, p2, p3 T func F() { // 3735936685 is 0xdeaddead. On ARM64 R27 is REGTMP. // clobber x, y at entry. not clobber z (stack object). - // amd64:`MOVL\t\$3735936685, command-line-arguments\.x`, `MOVL\t\$3735936685, command-line-arguments\.y`, -`MOVL\t\$3735936685, command-line-arguments\.z` - // arm64:`MOVW\tR27, command-line-arguments\.x`, `MOVW\tR27, command-line-arguments\.y`, -`MOVW\tR27, command-line-arguments\.z` + // amd64:`MOVL \$3735936685, command-line-arguments\.x`, `MOVL \$3735936685, command-line-arguments\.y`, -`MOVL \$3735936685, command-line-arguments\.z` + // arm64:`MOVW R27, command-line-arguments\.x`, `MOVW R27, command-line-arguments\.y`, -`MOVW R27, command-line-arguments\.z` x, y, z := p1, p2, p3 addrTaken(&z) // x is dead at the call (the value of x is loaded before the CALL), y is not - // amd64:`MOVL\t\$3735936685, command-line-arguments\.x`, -`MOVL\t\$3735936685, command-line-arguments\.y` - // arm64:`MOVW\tR27, command-line-arguments\.x`, -`MOVW\tR27, command-line-arguments\.y` + // amd64:`MOVL \$3735936685, command-line-arguments\.x`, -`MOVL \$3735936685, command-line-arguments\.y` + // arm64:`MOVW R27, command-line-arguments\.x`, -`MOVW R27, command-line-arguments\.y` use(x) - // amd64:`MOVL\t\$3735936685, command-line-arguments\.x`, `MOVL\t\$3735936685, command-line-arguments\.y` - // arm64:`MOVW\tR27, command-line-arguments\.x`, `MOVW\tR27, command-line-arguments\.y` + // amd64:`MOVL \$3735936685, command-line-arguments\.x`, `MOVL \$3735936685, command-line-arguments\.y` + // arm64:`MOVW R27, command-line-arguments\.x`, `MOVW R27, command-line-arguments\.y` use(y) } diff --git a/test/codegen/clobberdeadreg.go b/test/codegen/clobberdeadreg.go index 39c4a743cb9..24043438b2e 100644 --- a/test/codegen/clobberdeadreg.go +++ b/test/codegen/clobberdeadreg.go @@ -14,14 +14,14 @@ type S struct { func F(a, b, c int, d S) { // -2401018187971961171 is 0xdeaddeaddeaddead - // amd64:`MOVQ\t\$-2401018187971961171, AX`, `MOVQ\t\$-2401018187971961171, BX`, `MOVQ\t\$-2401018187971961171, CX` - // amd64:`MOVQ\t\$-2401018187971961171, DX`, `MOVQ\t\$-2401018187971961171, SI`, `MOVQ\t\$-2401018187971961171, DI` - // amd64:`MOVQ\t\$-2401018187971961171, R8`, `MOVQ\t\$-2401018187971961171, R9`, `MOVQ\t\$-2401018187971961171, R10` - // amd64:`MOVQ\t\$-2401018187971961171, R11`, `MOVQ\t\$-2401018187971961171, R12`, `MOVQ\t\$-2401018187971961171, R13` - // amd64:-`MOVQ\t\$-2401018187971961171, BP` // frame pointer is not clobbered + // amd64:`MOVQ \$-2401018187971961171, AX`, `MOVQ \$-2401018187971961171, BX`, `MOVQ \$-2401018187971961171, CX` + // amd64:`MOVQ \$-2401018187971961171, DX`, `MOVQ \$-2401018187971961171, SI`, `MOVQ \$-2401018187971961171, DI` + // amd64:`MOVQ \$-2401018187971961171, R8`, `MOVQ \$-2401018187971961171, R9`, `MOVQ \$-2401018187971961171, R10` + // amd64:`MOVQ \$-2401018187971961171, R11`, `MOVQ \$-2401018187971961171, R12`, `MOVQ \$-2401018187971961171, R13` + // amd64:-`MOVQ \$-2401018187971961171, BP` // frame pointer is not clobbered StackArgsCall([10]int{a, b, c}) - // amd64:`MOVQ\t\$-2401018187971961171, R12`, `MOVQ\t\$-2401018187971961171, R13`, `MOVQ\t\$-2401018187971961171, DX` - // amd64:-`MOVQ\t\$-2401018187971961171, AX`, -`MOVQ\t\$-2401018187971961171, R11` // register args are not clobbered + // amd64:`MOVQ \$-2401018187971961171, R12`, `MOVQ \$-2401018187971961171, R13`, `MOVQ \$-2401018187971961171, DX` + // amd64:-`MOVQ \$-2401018187971961171, AX`, -`MOVQ \$-2401018187971961171, R11` // register args are not clobbered RegArgsCall(a, b, c, d) } diff --git a/test/codegen/compare_and_branch.go b/test/codegen/compare_and_branch.go index 759dd263581..7ffd25ced81 100644 --- a/test/codegen/compare_and_branch.go +++ b/test/codegen/compare_and_branch.go @@ -11,12 +11,12 @@ func dummy() {} // Signed 64-bit compare-and-branch. func si64(x, y chan int64) { - // s390x:"CGRJ\t[$](2|4), R[0-9]+, R[0-9]+, " + // s390x:"CGRJ [$](2|4), R[0-9]+, R[0-9]+, " for <-x < <-y { dummy() } - // s390x:"CL?GRJ\t[$]8, R[0-9]+, R[0-9]+, " + // s390x:"CL?GRJ [$]8, R[0-9]+, R[0-9]+, " for <-x == <-y { dummy() } @@ -25,22 +25,22 @@ func si64(x, y chan int64) { // Signed 64-bit compare-and-branch with 8-bit immediate. func si64x8(doNotOptimize int64) { // take in doNotOptimize as an argument to avoid the loops being rewritten to count down - // s390x:"CGIJ\t[$]12, R[0-9]+, [$]127, " + // s390x:"CGIJ [$]12, R[0-9]+, [$]127, " for i := doNotOptimize; i < 128; i++ { dummy() } - // s390x:"CGIJ\t[$]10, R[0-9]+, [$]-128, " + // s390x:"CGIJ [$]10, R[0-9]+, [$]-128, " for i := doNotOptimize; i > -129; i-- { dummy() } - // s390x:"CGIJ\t[$]2, R[0-9]+, [$]127, " + // s390x:"CGIJ [$]2, R[0-9]+, [$]127, " for i := doNotOptimize; i >= 128; i++ { dummy() } - // s390x:"CGIJ\t[$]4, R[0-9]+, [$]-128, " + // s390x:"CGIJ [$]4, R[0-9]+, [$]-128, " for i := doNotOptimize; i <= -129; i-- { dummy() } @@ -48,12 +48,12 @@ func si64x8(doNotOptimize int64) { // Unsigned 64-bit compare-and-branch. func ui64(x, y chan uint64) { - // s390x:"CLGRJ\t[$](2|4), R[0-9]+, R[0-9]+, " + // s390x:"CLGRJ [$](2|4), R[0-9]+, R[0-9]+, " for <-x > <-y { dummy() } - // s390x:"CL?GRJ\t[$]6, R[0-9]+, R[0-9]+, " + // s390x:"CL?GRJ [$]6, R[0-9]+, R[0-9]+, " for <-x != <-y { dummy() } @@ -61,22 +61,22 @@ func ui64(x, y chan uint64) { // Unsigned 64-bit comparison with 8-bit immediate. func ui64x8() { - // s390x:"CLGIJ\t[$]4, R[0-9]+, [$]128, " + // s390x:"CLGIJ [$]4, R[0-9]+, [$]128, " for i := uint64(0); i < 128; i++ { dummy() } - // s390x:"CLGIJ\t[$]12, R[0-9]+, [$]255, " + // s390x:"CLGIJ [$]12, R[0-9]+, [$]255, " for i := uint64(0); i < 256; i++ { dummy() } - // s390x:"CLGIJ\t[$]2, R[0-9]+, [$]255, " + // s390x:"CLGIJ [$]2, R[0-9]+, [$]255, " for i := uint64(257); i >= 256; i-- { dummy() } - // s390x:"CLGIJ\t[$]2, R[0-9]+, [$]0, " + // s390x:"CLGIJ [$]2, R[0-9]+, [$]0, " for i := uint64(1024); i > 0; i-- { dummy() } @@ -84,12 +84,12 @@ func ui64x8() { // Signed 32-bit compare-and-branch. func si32(x, y chan int32) { - // s390x:"CRJ\t[$](2|4), R[0-9]+, R[0-9]+, " + // s390x:"CRJ [$](2|4), R[0-9]+, R[0-9]+, " for <-x < <-y { dummy() } - // s390x:"CL?RJ\t[$]8, R[0-9]+, R[0-9]+, " + // s390x:"CL?RJ [$]8, R[0-9]+, R[0-9]+, " for <-x == <-y { dummy() } @@ -98,22 +98,22 @@ func si32(x, y chan int32) { // Signed 32-bit compare-and-branch with 8-bit immediate. func si32x8(doNotOptimize int32) { // take in doNotOptimize as an argument to avoid the loops being rewritten to count down - // s390x:"CIJ\t[$]12, R[0-9]+, [$]127, " + // s390x:"CIJ [$]12, R[0-9]+, [$]127, " for i := doNotOptimize; i < 128; i++ { dummy() } - // s390x:"CIJ\t[$]10, R[0-9]+, [$]-128, " + // s390x:"CIJ [$]10, R[0-9]+, [$]-128, " for i := doNotOptimize; i > -129; i-- { dummy() } - // s390x:"CIJ\t[$]2, R[0-9]+, [$]127, " + // s390x:"CIJ [$]2, R[0-9]+, [$]127, " for i := doNotOptimize; i >= 128; i++ { dummy() } - // s390x:"CIJ\t[$]4, R[0-9]+, [$]-128, " + // s390x:"CIJ [$]4, R[0-9]+, [$]-128, " for i := doNotOptimize; i <= -129; i-- { dummy() } @@ -121,12 +121,12 @@ func si32x8(doNotOptimize int32) { // Unsigned 32-bit compare-and-branch. func ui32(x, y chan uint32) { - // s390x:"CLRJ\t[$](2|4), R[0-9]+, R[0-9]+, " + // s390x:"CLRJ [$](2|4), R[0-9]+, R[0-9]+, " for <-x > <-y { dummy() } - // s390x:"CL?RJ\t[$]6, R[0-9]+, R[0-9]+, " + // s390x:"CL?RJ [$]6, R[0-9]+, R[0-9]+, " for <-x != <-y { dummy() } @@ -134,22 +134,22 @@ func ui32(x, y chan uint32) { // Unsigned 32-bit comparison with 8-bit immediate. func ui32x8() { - // s390x:"CLIJ\t[$]4, R[0-9]+, [$]128, " + // s390x:"CLIJ [$]4, R[0-9]+, [$]128, " for i := uint32(0); i < 128; i++ { dummy() } - // s390x:"CLIJ\t[$]12, R[0-9]+, [$]255, " + // s390x:"CLIJ [$]12, R[0-9]+, [$]255, " for i := uint32(0); i < 256; i++ { dummy() } - // s390x:"CLIJ\t[$]2, R[0-9]+, [$]255, " + // s390x:"CLIJ [$]2, R[0-9]+, [$]255, " for i := uint32(257); i >= 256; i-- { dummy() } - // s390x:"CLIJ\t[$]2, R[0-9]+, [$]0, " + // s390x:"CLIJ [$]2, R[0-9]+, [$]0, " for i := uint32(1024); i > 0; i-- { dummy() } @@ -157,12 +157,12 @@ func ui32x8() { // Signed 64-bit comparison with unsigned 8-bit immediate. func si64xu8(x chan int64) { - // s390x:"CLGIJ\t[$]8, R[0-9]+, [$]128, " + // s390x:"CLGIJ [$]8, R[0-9]+, [$]128, " for <-x == 128 { dummy() } - // s390x:"CLGIJ\t[$]6, R[0-9]+, [$]255, " + // s390x:"CLGIJ [$]6, R[0-9]+, [$]255, " for <-x != 255 { dummy() } @@ -170,12 +170,12 @@ func si64xu8(x chan int64) { // Signed 32-bit comparison with unsigned 8-bit immediate. func si32xu8(x chan int32) { - // s390x:"CLIJ\t[$]8, R[0-9]+, [$]255, " + // s390x:"CLIJ [$]8, R[0-9]+, [$]255, " for <-x == 255 { dummy() } - // s390x:"CLIJ\t[$]6, R[0-9]+, [$]128, " + // s390x:"CLIJ [$]6, R[0-9]+, [$]128, " for <-x != 128 { dummy() } @@ -183,12 +183,12 @@ func si32xu8(x chan int32) { // Unsigned 64-bit comparison with signed 8-bit immediate. func ui64xu8(x chan uint64) { - // s390x:"CGIJ\t[$]8, R[0-9]+, [$]-1, " + // s390x:"CGIJ [$]8, R[0-9]+, [$]-1, " for <-x == ^uint64(0) { dummy() } - // s390x:"CGIJ\t[$]6, R[0-9]+, [$]-128, " + // s390x:"CGIJ [$]6, R[0-9]+, [$]-128, " for <-x != ^uint64(127) { dummy() } @@ -196,12 +196,12 @@ func ui64xu8(x chan uint64) { // Unsigned 32-bit comparison with signed 8-bit immediate. func ui32xu8(x chan uint32) { - // s390x:"CIJ\t[$]8, R[0-9]+, [$]-128, " + // s390x:"CIJ [$]8, R[0-9]+, [$]-128, " for <-x == ^uint32(127) { dummy() } - // s390x:"CIJ\t[$]6, R[0-9]+, [$]-1, " + // s390x:"CIJ [$]6, R[0-9]+, [$]-1, " for <-x != ^uint32(0) { dummy() } diff --git a/test/codegen/comparisons.go b/test/codegen/comparisons.go index 702ea275cc9..74a7d689da3 100644 --- a/test/codegen/comparisons.go +++ b/test/codegen/comparisons.go @@ -21,68 +21,68 @@ import ( // Check that compare to constant string use 2/4/8 byte compares func CompareString1(s string) bool { - // amd64:`CMPW\t\(.*\), [$]` - // arm64:`MOVHU\t\(.*\), [R]`,`MOVD\t[$]`,`CMPW\tR` - // ppc64le:`MOVHZ\t\(.*\), [R]`,`CMPW\t.*, [$]` - // s390x:`MOVHBR\t\(.*\), [R]`,`CMPW\t.*, [$]` + // amd64:`CMPW \(.*\), [$]` + // arm64:`MOVHU \(.*\), [R]`,`MOVD [$]`,`CMPW R` + // ppc64le:`MOVHZ \(.*\), [R]`,`CMPW .*, [$]` + // s390x:`MOVHBR \(.*\), [R]`,`CMPW .*, [$]` return s == "xx" } func CompareString2(s string) bool { - // amd64:`CMPL\t\(.*\), [$]` - // arm64:`MOVWU\t\(.*\), [R]`,`CMPW\t.*, [R]` - // ppc64le:`MOVWZ\t\(.*\), [R]`,`CMPW\t.*, [R]` - // s390x:`MOVWBR\t\(.*\), [R]`,`CMPW\t.*, [$]` + // amd64:`CMPL \(.*\), [$]` + // arm64:`MOVWU \(.*\), [R]`,`CMPW .*, [R]` + // ppc64le:`MOVWZ \(.*\), [R]`,`CMPW .*, [R]` + // s390x:`MOVWBR \(.*\), [R]`,`CMPW .*, [$]` return s == "xxxx" } func CompareString3(s string) bool { - // amd64:`CMPQ\t\(.*\), [A-Z]` - // arm64:-`CMPW\t` - // ppc64x:-`CMPW\t` - // s390x:-`CMPW\t` + // amd64:`CMPQ \(.*\), [A-Z]` + // arm64:-`CMPW ` + // ppc64x:-`CMPW ` + // s390x:-`CMPW ` return s == "xxxxxxxx" } // Check that arrays compare use 2/4/8 byte compares func CompareArray1(a, b [2]byte) bool { - // amd64:`CMPW\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` - // arm64:-`MOVBU\t` - // ppc64le:-`MOVBZ\t` - // s390x:-`MOVBZ\t` + // amd64:`CMPW command-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` + // arm64:-`MOVBU ` + // ppc64le:-`MOVBZ ` + // s390x:-`MOVBZ ` return a == b } func CompareArray2(a, b [3]uint16) bool { - // amd64:`CMPL\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` - // amd64:`CMPW\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` + // amd64:`CMPL command-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` + // amd64:`CMPW command-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` return a == b } func CompareArray3(a, b [3]int16) bool { - // amd64:`CMPL\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` - // amd64:`CMPW\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` + // amd64:`CMPL command-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` + // amd64:`CMPW command-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` return a == b } func CompareArray4(a, b [12]int8) bool { - // amd64:`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` - // amd64:`CMPL\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` + // amd64:`CMPQ command-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` + // amd64:`CMPL command-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` return a == b } func CompareArray5(a, b [15]byte) bool { - // amd64:`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` + // amd64:`CMPQ command-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` return a == b } // This was a TODO in mapaccess1_faststr func CompareArray6(a, b unsafe.Pointer) bool { - // amd64:`CMPL\t\(.*\), [A-Z]` - // arm64:`MOVWU\t\(.*\), [R]`,`CMPW\t.*, [R]` - // ppc64le:`MOVWZ\t\(.*\), [R]`,`CMPW\t.*, [R]` - // s390x:`MOVWBR\t\(.*\), [R]`,`CMPW\t.*, [R]` + // amd64:`CMPL \(.*\), [A-Z]` + // arm64:`MOVWU \(.*\), [R]`,`CMPW .*, [R]` + // ppc64le:`MOVWZ \(.*\), [R]`,`CMPW .*, [R]` + // s390x:`MOVWBR \(.*\), [R]`,`CMPW .*, [R]` return *((*[4]byte)(a)) != *((*[4]byte)(b)) } @@ -93,7 +93,7 @@ type T1 struct { } func CompareStruct1(s1, s2 T1) bool { - // amd64:`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` + // amd64:`CMPQ command-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` // amd64:-`CALL` return s1 == s2 } @@ -103,7 +103,7 @@ type T2 struct { } func CompareStruct2(s1, s2 T2) bool { - // amd64:`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` + // amd64:`CMPQ command-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` // amd64:-`CALL` return s1 == s2 } @@ -116,7 +116,7 @@ type T3 struct { } func CompareStruct3(s1, s2 T3) bool { - // amd64:-`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` + // amd64:-`CMPQ command-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` // amd64:`CALL` return s1 == s2 } @@ -126,7 +126,7 @@ type T4 struct { } func CompareStruct4(s1, s2 T4) bool { - // amd64:-`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` + // amd64:-`CMPQ command-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` // amd64:`CALL` return s1 == s2 } @@ -140,7 +140,7 @@ func CompareStruct4(s1, s2 T4) bool { var r bool func CmpFold(x uint32) { - // amd64:`SETHI\t.*\(SB\)` + // amd64:`SETHI .*\(SB\)` r = x > 4 } @@ -148,27 +148,27 @@ func CmpFold(x uint32) { // possible func CmpMem1(p int, q *int) bool { - // amd64:`CMPQ\t\(.*\), [A-Z]` + // amd64:`CMPQ \(.*\), [A-Z]` return p < *q } func CmpMem2(p *int, q int) bool { - // amd64:`CMPQ\t\(.*\), [A-Z]` + // amd64:`CMPQ \(.*\), [A-Z]` return *p < q } func CmpMem3(p *int) bool { - // amd64:`CMPQ\t\(.*\), [$]7` + // amd64:`CMPQ \(.*\), [$]7` return *p < 7 } func CmpMem4(p *int) bool { - // amd64:`CMPQ\t\(.*\), [$]7` + // amd64:`CMPQ \(.*\), [$]7` return 7 < *p } func CmpMem5(p **int) { - // amd64:`CMPL\truntime.writeBarrier\(SB\), [$]0` + // amd64:`CMPL runtime.writeBarrier\(SB\), [$]0` *p = nil } @@ -270,31 +270,31 @@ func CmpToZero(a, b, d int32, e, f int64, deOptC0, deOptC1 bool) int32 { func CmpLogicalToZero(a, b, c uint32, d, e, f, g uint64) uint64 { - // ppc64x:"ANDCC",-"CMPW" - // wasm:"I64Eqz",-"I32Eqz",-"I64ExtendI32U",-"I32WrapI64" + // ppc64x:"ANDCC" -"CMPW" + // wasm:"I64Eqz" -"I32Eqz" -"I64ExtendI32U" -"I32WrapI64" if a&63 == 0 { return 1 } - // ppc64x:"ANDCC",-"CMP" - // wasm:"I64Eqz",-"I32Eqz",-"I64ExtendI32U",-"I32WrapI64" + // ppc64x:"ANDCC" -"CMP" + // wasm:"I64Eqz" -"I32Eqz" -"I64ExtendI32U" -"I32WrapI64" if d&255 == 0 { return 1 } - // ppc64x:"ANDCC",-"CMP" - // wasm:"I64Eqz",-"I32Eqz",-"I64ExtendI32U",-"I32WrapI64" + // ppc64x:"ANDCC" -"CMP" + // wasm:"I64Eqz" -"I32Eqz" -"I64ExtendI32U" -"I32WrapI64" if d&e == 0 { return 1 } - // ppc64x:"ORCC",-"CMP" - // wasm:"I64Eqz",-"I32Eqz",-"I64ExtendI32U",-"I32WrapI64" + // ppc64x:"ORCC" -"CMP" + // wasm:"I64Eqz" -"I32Eqz" -"I64ExtendI32U" -"I32WrapI64" if f|g == 0 { return 1 } - // ppc64x:"XORCC",-"CMP" - // wasm:"I64Eqz","I32Eqz",-"I64ExtendI32U",-"I32WrapI64" + // ppc64x:"XORCC" -"CMP" + // wasm:"I64Eqz" "I32Eqz" -"I64ExtendI32U" -"I32WrapI64" if e^d == 0 { return 1 } @@ -711,19 +711,19 @@ func cmpToCmn(a, b, c, d int) int { func cmpToCmnLessThan(a, b, c, d int) int { var c1, c2, c3, c4 int - // arm64:`CMN`,`CSET\tMI`,-`CMP` + // arm64:`CMN`,`CSET MI`,-`CMP` if a+1 < 0 { c1 = 1 } - // arm64:`CMN`,`CSET\tMI`,-`CMP` + // arm64:`CMN`,`CSET MI`,-`CMP` if a+b < 0 { c2 = 1 } - // arm64:`CMN`,`CSET\tMI`,-`CMP` + // arm64:`CMN`,`CSET MI`,-`CMP` if a*b+c < 0 { c3 = 1 } - // arm64:`CMP`,`CSET\tMI`,-`CMN` + // arm64:`CMP`,`CSET MI`,-`CMN` if a-b*c < 0 { c4 = 1 } @@ -780,19 +780,19 @@ func ge128Signed64(x int64) bool { func cmpToCmnGreaterThanEqual(a, b, c, d int) int { var c1, c2, c3, c4 int - // arm64:`CMN`,`CSET\tPL`,-`CMP` + // arm64:`CMN`,`CSET PL`,-`CMP` if a+1 >= 0 { c1 = 1 } - // arm64:`CMN`,`CSET\tPL`,-`CMP` + // arm64:`CMN`,`CSET PL`,-`CMP` if a+b >= 0 { c2 = 1 } - // arm64:`CMN`,`CSET\tPL`,-`CMP` + // arm64:`CMN`,`CSET PL`,-`CMP` if a*b+c >= 0 { c3 = 1 } - // arm64:`CMP`,`CSET\tPL`,-`CMN` + // arm64:`CMP`,`CSET PL`,-`CMN` if a-b*c >= 0 { c4 = 1 } @@ -870,6 +870,6 @@ func cmpstring2(x, y string) int { // like in cmpstring1 will not work. Instead, we // look for spill/restore instructions, which only // need to exist if there are 2 calls. - //amd64:-`MOVQ\t.*\(SP\)` + //amd64:-`MOVQ .*\(SP\)` return cmp.Compare(x, y) } diff --git a/test/codegen/condmove.go b/test/codegen/condmove.go index 97be0ced75d..54e5f361fea 100644 --- a/test/codegen/condmove.go +++ b/test/codegen/condmove.go @@ -12,8 +12,8 @@ func cmovint(c int) int { x = 182 } // amd64:"CMOVQLT" - // arm64:"CSEL\tLT" - // ppc64x:"ISEL\t[$]0" + // arm64:"CSEL LT" + // ppc64x:"ISEL [$]0" // wasm:"Select" return x } @@ -23,8 +23,8 @@ func cmovchan(x, y chan int) chan int { x = y } // amd64:"CMOVQNE" - // arm64:"CSEL\tNE" - // ppc64x:"ISEL\t[$]2" + // arm64:"CSEL NE" + // ppc64x:"ISEL [$]2" // wasm:"Select" return x } @@ -34,8 +34,8 @@ func cmovuintptr(x, y uintptr) uintptr { x = -y } // amd64:"CMOVQ(HI|CS)" - // arm64:"CSNEG\tLS" - // ppc64x:"ISEL\t[$]1" + // arm64:"CSNEG LS" + // ppc64x:"ISEL [$]1" // wasm:"Select" return x } @@ -45,8 +45,8 @@ func cmov32bit(x, y uint32) uint32 { x = -y } // amd64:"CMOVL(HI|CS)" - // arm64:"CSNEG\t(LS|HS)" - // ppc64x:"ISEL\t[$]1" + // arm64:"CSNEG (LS|HS)" + // ppc64x:"ISEL [$]1" // wasm:"Select" return x } @@ -56,8 +56,8 @@ func cmov16bit(x, y uint16) uint16 { x = -y } // amd64:"CMOVW(HI|CS)" - // arm64:"CSNEG\t(LS|HS)" - // ppc64x:"ISEL\t[$][01]" + // arm64:"CSNEG (LS|HS)" + // ppc64x:"ISEL [$][01]" // wasm:"Select" return x } @@ -69,9 +69,9 @@ func cmovfloateq(x, y float64) int { if x == y { a = 256 } - // amd64:"CMOVQNE","CMOVQPC" - // arm64:"CSEL\tEQ" - // ppc64x:"ISEL\t[$]2" + // amd64:"CMOVQNE" "CMOVQPC" + // arm64:"CSEL EQ" + // ppc64x:"ISEL [$]2" // wasm:"Select" return a } @@ -81,9 +81,9 @@ func cmovfloatne(x, y float64) int { if x != y { a = 256 } - // amd64:"CMOVQNE","CMOVQPS" - // arm64:"CSEL\tNE" - // ppc64x:"ISEL\t[$]2" + // amd64:"CMOVQNE" "CMOVQPS" + // arm64:"CSEL NE" + // ppc64x:"ISEL [$]2" // wasm:"Select" return a } @@ -109,8 +109,8 @@ func cmovfloatint2(x, y float64) float64 { rexp = rexp - 42 } // amd64:"CMOVQHI" - // arm64:"CSEL\tMI" - // ppc64x:"ISEL\t[$]0" + // arm64:"CSEL MI" + // ppc64x:"ISEL [$]0" // wasm:"Select" r = r - ldexp(y, rexp-yexp) } @@ -124,8 +124,8 @@ func cmovloaded(x [4]int, y int) int { y = y >> 2 } // amd64:"CMOVQNE" - // arm64:"CSEL\tNE" - // ppc64x:"ISEL\t[$]2" + // arm64:"CSEL NE" + // ppc64x:"ISEL [$]2" // wasm:"Select" return y } @@ -136,8 +136,8 @@ func cmovuintptr2(x, y uintptr) uintptr { a = 256 } // amd64:"CMOVQEQ" - // arm64:"CSEL\tEQ" - // ppc64x:"ISEL\t[$]2" + // arm64:"CSEL EQ" + // ppc64x:"ISEL [$]2" // wasm:"Select" return a } @@ -230,7 +230,7 @@ func cmovinc(cond bool, a, b, c int) { } else { x0 = b + 1 } - // arm64:"CSINC\tNE", -"CSEL" + // arm64:"CSINC NE", -"CSEL" r0 = x0 if cond { @@ -238,13 +238,13 @@ func cmovinc(cond bool, a, b, c int) { } else { x1 = a } - // arm64:"CSINC\tEQ", -"CSEL" + // arm64:"CSINC EQ", -"CSEL" r1 = x1 if cond { c++ } - // arm64:"CSINC\tEQ", -"CSEL" + // arm64:"CSINC EQ", -"CSEL" r2 = c } @@ -256,7 +256,7 @@ func cmovinv(cond bool, a, b int) { } else { x0 = ^b } - // arm64:"CSINV\tNE", -"CSEL" + // arm64:"CSINV NE", -"CSEL" r0 = x0 if cond { @@ -264,7 +264,7 @@ func cmovinv(cond bool, a, b int) { } else { x1 = a } - // arm64:"CSINV\tEQ", -"CSEL" + // arm64:"CSINV EQ", -"CSEL" r1 = x1 } @@ -276,7 +276,7 @@ func cmovneg(cond bool, a, b, c int) { } else { x0 = -b } - // arm64:"CSNEG\tNE", -"CSEL" + // arm64:"CSNEG NE", -"CSEL" r0 = x0 if cond { @@ -284,7 +284,7 @@ func cmovneg(cond bool, a, b, c int) { } else { x1 = a } - // arm64:"CSNEG\tEQ", -"CSEL" + // arm64:"CSNEG EQ", -"CSEL" r1 = x1 } @@ -296,7 +296,7 @@ func cmovsetm(cond bool, x int) { } else { x0 = 0 } - // arm64:"CSETM\tNE", -"CSEL" + // arm64:"CSETM NE", -"CSEL" r0 = x0 if cond { @@ -304,7 +304,7 @@ func cmovsetm(cond bool, x int) { } else { x1 = -1 } - // arm64:"CSETM\tEQ", -"CSEL" + // arm64:"CSETM EQ", -"CSEL" r1 = x1 } @@ -316,7 +316,7 @@ func cmovFcmp0(s, t float64, a, b int) { } else { x0 = b + 1 } - // arm64:"CSINC\tMI", -"CSEL" + // arm64:"CSINC MI", -"CSEL" r0 = x0 if s <= t { @@ -324,7 +324,7 @@ func cmovFcmp0(s, t float64, a, b int) { } else { x1 = ^b } - // arm64:"CSINV\tLS", -"CSEL" + // arm64:"CSINV LS", -"CSEL" r1 = x1 if s > t { @@ -332,7 +332,7 @@ func cmovFcmp0(s, t float64, a, b int) { } else { x2 = -b } - // arm64:"CSNEG\tMI", -"CSEL" + // arm64:"CSNEG MI", -"CSEL" r2 = x2 if s >= t { @@ -340,7 +340,7 @@ func cmovFcmp0(s, t float64, a, b int) { } else { x3 = 0 } - // arm64:"CSETM\tLS", -"CSEL" + // arm64:"CSETM LS", -"CSEL" r3 = x3 if s == t { @@ -348,7 +348,7 @@ func cmovFcmp0(s, t float64, a, b int) { } else { x4 = b + 1 } - // arm64:"CSINC\tEQ", -"CSEL" + // arm64:"CSINC EQ", -"CSEL" r4 = x4 if s != t { @@ -356,7 +356,7 @@ func cmovFcmp0(s, t float64, a, b int) { } else { x5 = b + 1 } - // arm64:"CSINC\tNE", -"CSEL" + // arm64:"CSINC NE", -"CSEL" r5 = x5 } @@ -368,7 +368,7 @@ func cmovFcmp1(s, t float64, a, b int) { } else { x0 = a } - // arm64:"CSINC\tPL", -"CSEL" + // arm64:"CSINC PL", -"CSEL" r0 = x0 if s <= t { @@ -376,7 +376,7 @@ func cmovFcmp1(s, t float64, a, b int) { } else { x1 = a } - // arm64:"CSINV\tHI", -"CSEL" + // arm64:"CSINV HI", -"CSEL" r1 = x1 if s > t { @@ -384,7 +384,7 @@ func cmovFcmp1(s, t float64, a, b int) { } else { x2 = a } - // arm64:"CSNEG\tPL", -"CSEL" + // arm64:"CSNEG PL", -"CSEL" r2 = x2 if s >= t { @@ -392,7 +392,7 @@ func cmovFcmp1(s, t float64, a, b int) { } else { x3 = -1 } - // arm64:"CSETM\tHI", -"CSEL" + // arm64:"CSETM HI", -"CSEL" r3 = x3 if s == t { @@ -400,7 +400,7 @@ func cmovFcmp1(s, t float64, a, b int) { } else { x4 = a } - // arm64:"CSINC\tNE", -"CSEL" + // arm64:"CSINC NE", -"CSEL" r4 = x4 if s != t { @@ -408,7 +408,7 @@ func cmovFcmp1(s, t float64, a, b int) { } else { x5 = a } - // arm64:"CSINC\tEQ", -"CSEL" + // arm64:"CSINC EQ", -"CSEL" r5 = x5 } @@ -439,7 +439,7 @@ func cmovzeroreg0(a, b int) int { if a == b { x = a } - // ppc64x:"ISEL\t[$]2, R[0-9]+, R0, R[0-9]+" + // ppc64x:"ISEL [$]2, R[0-9]+, R0, R[0-9]+" return x } @@ -448,7 +448,7 @@ func cmovzeroreg1(a, b int) int { if a == b { x = 0 } - // ppc64x:"ISEL\t[$]2, R0, R[0-9]+, R[0-9]+" + // ppc64x:"ISEL [$]2, R0, R[0-9]+, R[0-9]+" return x } @@ -507,3 +507,23 @@ func cmovmathhalveu(a uint, b bool) uint { // wasm:"I64ShrU", -"Select" return a } + +func branchlessBoolToUint8(b bool) (r uint8) { + if b { + r = 1 + } + return +} + +func cmovFromMulFromFlags64(x uint64, b bool) uint64 { + // amd64:-"MOVB.ZX" + r := uint64(branchlessBoolToUint8(b)) + // amd64:"CMOV",-"MOVB.ZX",-"MUL" + return x * r +} +func cmovFromMulFromFlags64sext(x int64, b bool) int64 { + // amd64:-"MOVB.ZX" + r := int64(int8(branchlessBoolToUint8(b))) + // amd64:"CMOV",-"MOVB.ZX",-"MUL" + return x * r +} diff --git a/test/codegen/constants.go b/test/codegen/constants.go index 3ce17d0ad3a..178a106552a 100644 --- a/test/codegen/constants.go +++ b/test/codegen/constants.go @@ -7,27 +7,29 @@ package codegen // A uint16 or sint16 constant shifted left. -func shifted16BitConstants(out [64]uint64) { - // ppc64x: "MOVD\t[$]8193,", "SLD\t[$]27," +func shifted16BitConstants() (out [64]uint64) { + // ppc64x: "MOVD [$]8193,", "SLD [$]27," out[0] = 0x0000010008000000 - // ppc64x: "MOVD\t[$]-32767", "SLD\t[$]26," + // ppc64x: "MOVD [$]-32767", "SLD [$]26," out[1] = 0xFFFFFE0004000000 - // ppc64x: "MOVD\t[$]-1", "SLD\t[$]48," + // ppc64x: "MOVD [$]-1", "SLD [$]48," out[2] = 0xFFFF000000000000 - // ppc64x: "MOVD\t[$]65535", "SLD\t[$]44," + // ppc64x: "MOVD [$]65535", "SLD [$]44," out[3] = 0x0FFFF00000000000 + return } // A contiguous set of 1 bits, potentially wrapping. -func contiguousMaskConstants(out [64]uint64) { - // ppc64x: "MOVD\t[$]-1", "RLDC\tR[0-9]+, [$]44, [$]63," +func contiguousMaskConstants() (out [64]uint64) { + // ppc64x: "MOVD [$]-1", "RLDC R[0-9]+, [$]44, [$]63," out[0] = 0xFFFFF00000000001 - // ppc64x: "MOVD\t[$]-1", "RLDC\tR[0-9]+, [$]43, [$]63," + // ppc64x: "MOVD [$]-1", "RLDC R[0-9]+, [$]43, [$]63," out[1] = 0xFFFFF80000000001 - // ppc64x: "MOVD\t[$]-1", "RLDC\tR[0-9]+, [$]43, [$]4," + // ppc64x: "MOVD [$]-1", "RLDC R[0-9]+, [$]43, [$]4," out[2] = 0x0FFFF80000000000 - // ppc64x/power8: "MOVD\t[$]-1", "RLDC\tR[0-9]+, [$]33, [$]63," - // ppc64x/power9: "MOVD\t[$]-1", "RLDC\tR[0-9]+, [$]33, [$]63," - // ppc64x/power10: "MOVD\t[$]-8589934591," + // ppc64x/power8: "MOVD [$]-1", "RLDC R[0-9]+, [$]33, [$]63," + // ppc64x/power9: "MOVD [$]-1", "RLDC R[0-9]+, [$]33, [$]63," + // ppc64x/power10: "MOVD [$]-8589934591," out[3] = 0xFFFFFFFE00000001 + return } diff --git a/test/codegen/copy.go b/test/codegen/copy.go index 4329c6d78f3..a4b9bedbe82 100644 --- a/test/codegen/copy.go +++ b/test/codegen/copy.go @@ -43,8 +43,8 @@ var x [256]byte func moveDisjointStack32() { var s [32]byte // ppc64x:-".*memmove" - // ppc64x/power8:"LXVD2X",-"ADD",-"BC" - // ppc64x/power9:"LXV",-"LXVD2X",-"ADD",-"BC" + // ppc64x/power8:"LXVD2X" -"ADD" -"BC" + // ppc64x/power9:"LXV" -"LXVD2X" -"ADD" -"BC" copy(s[:], x[:32]) runtime.KeepAlive(&s) } @@ -52,8 +52,8 @@ func moveDisjointStack32() { func moveDisjointStack64() { var s [96]byte // ppc64x:-".*memmove" - // ppc64x/power8:"LXVD2X","ADD","BC" - // ppc64x/power9:"LXV",-"LXVD2X",-"ADD",-"BC" + // ppc64x/power8:"LXVD2X" "ADD" "BC" + // ppc64x/power9:"LXV" -"LXVD2X" -"ADD" -"BC" copy(s[:], x[:96]) runtime.KeepAlive(&s) } @@ -64,7 +64,7 @@ func moveDisjointStack() { // amd64:-".*memmove" // ppc64x:-".*memmove" // ppc64x/power8:"LXVD2X" - // ppc64x/power9:"LXV",-"LXVD2X" + // ppc64x/power9:"LXV" -"LXVD2X" copy(s[:], x[:]) runtime.KeepAlive(&s) } @@ -75,7 +75,7 @@ func moveDisjointArg(b *[256]byte) { // amd64:-".*memmove" // ppc64x:-".*memmove" // ppc64x/power8:"LXVD2X" - // ppc64x/power9:"LXV",-"LXVD2X" + // ppc64x/power9:"LXV" -"LXVD2X" copy(s[:], b[:]) runtime.KeepAlive(&s) } @@ -85,7 +85,7 @@ func moveDisjointNoOverlap(a *[256]byte) { // amd64:-".*memmove" // ppc64x:-".*memmove" // ppc64x/power8:"LXVD2X" - // ppc64x/power9:"LXV",-"LXVD2X" + // ppc64x/power9:"LXV" -"LXVD2X" copy(a[:], a[128:]) } @@ -135,28 +135,28 @@ func moveArchLowering16(b []byte, x *[16]byte) { // Check that no branches are generated when the pointers are [not] equal. func ptrEqual() { - // amd64:-"JEQ",-"JNE" - // ppc64x:-"BEQ",-"BNE" - // s390x:-"BEQ",-"BNE" + // amd64:-"JEQ" -"JNE" + // ppc64x:-"BEQ" -"BNE" + // s390x:-"BEQ" -"BNE" copy(x[:], x[:]) } func ptrOneOffset() { - // amd64:-"JEQ",-"JNE" - // ppc64x:-"BEQ",-"BNE" - // s390x:-"BEQ",-"BNE" + // amd64:-"JEQ" -"JNE" + // ppc64x:-"BEQ" -"BNE" + // s390x:-"BEQ" -"BNE" copy(x[1:], x[:]) } func ptrBothOffset() { - // amd64:-"JEQ",-"JNE" - // ppc64x:-"BEQ",-"BNE" - // s390x:-"BEQ",-"BNE" + // amd64:-"JEQ" -"JNE" + // ppc64x:-"BEQ" -"BNE" + // s390x:-"BEQ" -"BNE" copy(x[1:], x[2:]) } // Verify #62698 on PPC64. func noMaskOnCopy(a []int, s string, x int) int { - // ppc64x:-"MOVD\t$-1", -"AND" + // ppc64x:-"MOVD [$]-1", -"AND" return a[x&^copy([]byte{}, s)] } diff --git a/test/codegen/divmod.go b/test/codegen/divmod.go new file mode 100644 index 00000000000..9de091af7a0 --- /dev/null +++ b/test/codegen/divmod.go @@ -0,0 +1,1200 @@ +// asmcheck + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package codegen + +// Div and mod rewrites, testing cmd/compile/internal/ssa/_gen/divmod.rules. +// See comments there for "Case 1" etc. + +// Convert multiplication by a power of two to a shift. + +func mul32_uint8(i uint8) uint8 { + // 386: "SHLL [$]5," + // arm64: "LSL [$]5," + return i * 32 +} + +func mul32_uint16(i uint16) uint16 { + // 386: "SHLL [$]5," + // arm64: "LSL [$]5," + return i * 32 +} + +func mul32_uint32(i uint32) uint32 { + // 386: "SHLL [$]5," + // arm64: "LSL [$]5," + return i * 32 +} + +func mul32_uint64(i uint64) uint64 { + // 386: "SHLL [$]5," + // 386: "SHRL [$]27," + // arm64: "LSL [$]5," + return i * 32 +} + +func mulNeg32_int8(i int8) int8 { + // 386: "SHLL [$]5," + // 386: "NEGL" + // arm64: "NEG R[0-9]+<<5," + return i * -32 +} + +func mulNeg32_int16(i int16) int16 { + // 386: "SHLL [$]5," + // 386: "NEGL" + // arm64: "NEG R[0-9]+<<5," + return i * -32 +} + +func mulNeg32_int32(i int32) int32 { + // 386: "SHLL [$]5," + // 386: "NEGL" + // arm64: "NEG R[0-9]+<<5," + return i * -32 +} + +func mulNeg32_int64(i int64) int64 { + // 386: "SHLL [$]5," + // 386: "SHRL [$]27," + // 386: "SBBL" + // arm64: "NEG R[0-9]+<<5," + return i * -32 +} + +// Signed divide by power of 2. + +func div32_int8(i int8) int8 { + // 386: "SARB [$]7," + // 386: "SHRB [$]3," + // 386: "ADDL" + // 386: "SARB [$]5," + // arm64: "SBFX [$]7, R[0-9]+, [$]1," + // arm64: "ADD R[0-9]+>>3," + // arm64: "SBFX [$]5, R[0-9]+, [$]3," + return i / 32 +} + +func div32_int16(i int16) int16 { + // 386: "SARW [$]15," + // 386: "SHRW [$]11," + // 386: "ADDL" + // 386: "SARW [$]5," + // arm64: "SBFX [$]15, R[0-9]+, [$]1," + // arm64: "ADD R[0-9]+>>11," + // arm64: "SBFX [$]5, R[0-9]+, [$]11," + return i / 32 +} + +func div32_int32(i int32) int32 { + // 386: "SARL [$]31," + // 386: "SHRL [$]27," + // 386: "ADDL" + // 386: "SARL [$]5," + // arm64: "SBFX [$]31, R[0-9]+, [$]1," + // arm64: "ADD R[0-9]+>>27," + // arm64: "SBFX [$]5, R[0-9]+, [$]27," + return i / 32 +} + +func div32_int64(i int64) int64 { + // 386: "SARL [$]31," + // 386: "SHRL [$]27," + // 386: "ADDL" + // 386: "SARL [$]5," + // 386: "SHRL [$]5," + // 386: "SHLL [$]27," + // arm64: "ASR [$]63," + // arm64: "ADD R[0-9]+>>59," + // arm64: "ASR [$]5," + return i / 32 +} + +// Case 1. Signed divides where 2N ≤ register size. + +func div7_int8(i int8) int8 { + // 386: "SARL [$]31," + // 386: "IMUL3L [$]147," + // 386: "SARL [$]10," + // 386: "SUBL" + // arm64: "MOVD [$]147," + // arm64: "MULW" + // arm64: "SBFX [$]10, R[0-9]+, [$]22," + // arm64: "SUB R[0-9]+->31," + // wasm: "I64Const [$]147" + return i / 7 +} + +func div7_int16(i int16) int16 { + // 386: "SARL [$]31," + // 386: "IMUL3L [$]37450," + // 386: "SARL [$]18," + // 386: "SUBL" + // arm64: "MOVD [$]37450," + // arm64: "MULW" + // arm64: "SBFX [$]18, R[0-9]+, [$]14," + // arm64: "SUB R[0-9]+->31," + // wasm: "I64Const [$]37450" + return i / 7 +} + +func div7_int32(i int32) int32 { + // 64-bit only + // arm64: "MOVD [$]2454267027," + // arm64: "MUL " + // arm64: "ASR [$]34," + // arm64: "SUB R[0-9]+->63," + // wasm: "I64Const [$]2454267027" + return i / 7 +} + +// Case 2. Signed divides where m is even. + +func div9_int32(i int32) int32 { + // 386: "SARL [$]31," + // 386: "MOVL [$]1908874354," + // 386: "IMULL" + // 386: "SARL [$]2," + // 386: "SUBL" + // arm64: "MOVD [$]3817748708," + // arm64: "MUL " + // arm64: "ASR [$]35," + // arm64: "SUB R[0-9]+->63," + // wasm: "I64Const [$]3817748708" + return i / 9 +} + +func div7_int64(i int64) int64 { + // 64-bit only + // arm64 MOVD $5270498306774157605, SMULH, ASR $1, SUB ->63 + // arm64: "MOVD [$]5270498306774157605," + // arm64: "SMULH" + // arm64: "ASR [$]1," + // arm64: "SUB R[0-9]+->63," + // wasm: "I64Const [$]613566757" + // wasm: "I64Const [$]1227133513" + return i / 7 +} + +// Case 3. Signed divides where m is odd. + +func div3_int32(i int32) int32 { + // 386: "SARL [$]31," + // 386: "MOVL [$]-1431655765," + // 386: "IMULL" + // 386: "SARL [$]1," + // 386: "SUBL" + // arm64: "MOVD [$]2863311531," + // arm64: "MUL" + // arm64: "ASR [$]33," + // arm64: "SUB R[0-9]+->63," + // wasm: "I64Const [$]2863311531" + return i / 3 +} + +func div3_int64(i int64) int64 { + // 64-bit only + // arm64: "MOVD [$]-6148914691236517205," + // arm64: "SMULH" + // arm64: "ADD" + // arm64: "ASR [$]1," + // arm64: "SUB R[0-9]+->63," + // wasm: "I64Const [$]-1431655766" + // wasm: "I64Const [$]2863311531" + return i / 3 +} + +// Case 4. Unsigned divide where x < 1<<(N-1). + +func div7_int16u(i int16) int16 { + if i < 0 { + return 0 + } + // 386: "IMUL3L [$]37450," + // 386: "SHRL [$]18," + // 386: -"SUBL" + // arm64: "MOVD [$]37450," + // arm64: "MULW" + // arm64: "UBFX [$]18, R[0-9]+, [$]14," + // arm64: -"SUB" + // wasm: "I64Const [$]37450" + // wasm -"I64Sub" + return i / 7 +} + +func div7_int32u(i int32) int32 { + if i < 0 { + return 0 + } + // 386: "MOVL [$]-1840700269," + // 386: "MULL" + // 386: "SHRL [$]2" + // 386: -"SUBL" + // arm64: "MOVD [$]2454267027," + // arm64: "MUL" + // arm64: "LSR [$]34," + // arm64: -"SUB" + // wasm: "I64Const [$]2454267027" + // wasm -"I64Sub" + return i / 7 +} + +func div7_int64u(i int64) int64 { + // 64-bit only + if i < 0 { + return 0 + } + // arm64: "MOVD [$]-7905747460161236406," + // arm64: "UMULH" + // arm64: "LSR [$]2," + // arm64: -"SUB" + // wasm: "I64Const [$]1227133514" + // wasm: "I64Const [$]2454267026" + // wasm -"I64Sub" + return i / 7 +} + +// Case 5. Unsigned divide where 2N+1 ≤ register size. + +func div7_uint8(i uint8) uint8 { + // 386: "IMUL3L [$]293," + // 386: "SHRL [$]11," + // arm64: "MOVD [$]293," + // arm64: "MULW" + // arm64: "UBFX [$]11, R[0-9]+, [$]21," + // wasm: "I64Const [$]293" + return i / 7 +} + +func div7_uint16(i uint16) uint16 { + // only 64-bit + // arm64: "MOVD [$]74899," + // arm64: "MUL" + // arm64: "LSR [$]19," + // wasm: "I64Const [$]74899" + return i / 7 +} + +// Case 6. Unsigned divide where m is even. + +func div3_uint16(i uint16) uint16 { + // 386: "IMUL3L [$]43691," "SHRL [$]17," + // arm64: "MOVD [$]87382," + // arm64: "MUL" + // arm64: "LSR [$]18," + // wasm: "I64Const [$]87382" + return i / 3 +} + +func div3_uint32(i uint32) uint32 { + // 386: "MOVL [$]-1431655765," "MULL", "SHRL [$]1," + // arm64: "MOVD [$]2863311531," + // arm64: "MUL" + // arm64: "LSR [$]33," + // wasm: "I64Const [$]2863311531" + return i / 3 +} + +func div3_uint64(i uint64) uint64 { + // 386: "MOVL [$]-1431655766" + // 386: "MULL" + // 386: "SHRL [$]1" + // 386 -".*CALL" + // arm64: "MOVD [$]-6148914691236517205," + // arm64: "UMULH" + // arm64: "LSR [$]1," + // wasm: "I64Const [$]2863311530" + // wasm: "I64Const [$]2863311531" + return i / 3 +} + +// Case 7. Unsigned divide where c is even. + +func div14_uint16(i uint16) uint16 { + // 32-bit only + // 386: "SHRL [$]1," + // 386: "IMUL3L [$]37450," + // 386: "SHRL [$]18," + return i / 14 +} + +func div14_uint32(i uint32) uint32 { + // 386: "SHRL [$]1," + // 386: "MOVL [$]-1840700269," + // 386: "SHRL [$]2," + // arm64: "UBFX [$]1, R[0-9]+, [$]31," + // arm64: "MOVD [$]2454267027," + // arm64: "MUL" + // arm64: "LSR [$]34," + // wasm: "I64Const [$]2454267027" + return i / 14 +} + +func div14_uint64(i uint64) uint64 { + // 386: "MOVL [$]-1840700270," + // 386: "MULL" + // 386: "SHRL [$]2," + // 386: -".*CALL" + // arm64: "MOVD [$]-7905747460161236406," + // arm64: "UMULH" + // arm64: "LSR [$]2," + // wasm: "I64Const [$]1227133514" + // wasm: "I64Const [$]2454267026" + return i / 14 +} + +// Case 8. Unsigned divide on systems with avg. + +func div7_uint16a(i uint16) uint16 { + // only 32-bit + // 386: "SHLL [$]16," + // 386: "IMUL3L [$]9363," + // 386: "ADDL" + // 386: "RCRL [$]1," + // 386: "SHRL [$]18," + return i / 7 +} + +func div7_uint32(i uint32) uint32 { + // 386: "MOVL [$]613566757," + // 386: "MULL" + // 386: "ADDL" + // 386: "RCRL [$]1," + // 386: "SHRL [$]2," + // arm64: "UBFIZ [$]32, R[0-9]+, [$]32," + // arm64: "MOVD [$]613566757," + // arm64: "MUL" + // arm64: "SUB" + // arm64: "ADD R[0-9]+>>1," + // arm64: "LSR [$]34," + // wasm: "I64Const [$]613566757" + return i / 7 +} + +func div7_uint64(i uint64) uint64 { + // 386: "MOVL [$]-1840700269," + // 386: "MULL" + // 386: "SHRL [$]2," + // 386: -".*CALL" + // arm64: "MOVD [$]2635249153387078803," + // arm64: "UMULH" + // arm64: "SUB", + // arm64: "ADD R[0-9]+>>1," + // arm64: "LSR [$]2," + // wasm: "I64Const [$]613566756" + // wasm: "I64Const [$]2454267027" + return i / 7 +} + +func div12345_uint64(i uint64) uint64 { + // 386: "MOVL [$]-1444876402," + // 386: "MOVL [$]835683390," + // 386: "MULL" + // 386: "SHRL [$]13," + // 386: "SHLL [$]19," + // arm64: "MOVD [$]-6205696892516465602," + // arm64: "UMULH" + // arm64: "LSR [$]13," + // wasm: "I64Const [$]835683390" + // wasm: "I64Const [$]2850090894" + return i / 12345 +} + +// Divisibility and non-divisibility by power of two. + +func divis32_uint8(i uint8) bool { + // 386: "TESTB [$]31," + // arm64: "TSTW [$]31," + return i%32 == 0 +} + +func ndivis32_uint8(i uint8) bool { + // 386: "TESTB [$]31," + // arm64: "TSTW [$]31," + return i%32 != 0 +} + +func divis32_uint16(i uint16) bool { + // 386: "TESTW [$]31," + // arm64: "TSTW [$]31," + return i%32 == 0 +} + +func ndivis32_uint16(i uint16) bool { + // 386: "TESTW [$]31," + // arm64: "TSTW [$]31," + return i%32 != 0 +} + +func divis32_uint32(i uint32) bool { + // 386: "TESTL [$]31," + // arm64: "TSTW [$]31," + return i%32 == 0 +} + +func ndivis32_uint32(i uint32) bool { + // 386: "TESTL [$]31," + // arm64: "TSTW [$]31," + return i%32 != 0 +} + +func divis32_uint64(i uint64) bool { + // 386: "TESTL [$]31," + // arm64: "TST [$]31," + return i%32 == 0 +} + +func ndivis32_uint64(i uint64) bool { + // 386: "TESTL [$]31," + // arm64: "TST [$]31," + return i%32 != 0 +} + +func divis32_int8(i int8) bool { + // 386: "TESTB [$]31," + // arm64: "TSTW [$]31," + return i%32 == 0 +} + +func ndivis32_int8(i int8) bool { + // 386: "TESTB [$]31," + // arm64: "TSTW [$]31," + return i%32 != 0 +} + +func divis32_int16(i int16) bool { + // 386: "TESTW [$]31," + // arm64: "TSTW [$]31," + return i%32 == 0 +} + +func ndivis32_int16(i int16) bool { + // 386: "TESTW [$]31," + // arm64: "TSTW [$]31," + return i%32 != 0 +} + +func divis32_int32(i int32) bool { + // 386: "TESTL [$]31," + // arm64: "TSTW [$]31," + return i%32 == 0 +} + +func ndivis32_int32(i int32) bool { + // 386: "TESTL [$]31," + // arm64: "TSTW [$]31," + return i%32 != 0 +} + +func divis32_int64(i int64) bool { + // 386: "TESTL [$]31," + // arm64: "TST [$]31," + return i%32 == 0 +} + +func ndivis32_int64(i int64) bool { + // 386: "TESTL [$]31," + // arm64: "TST [$]31," + return i%32 != 0 +} + +// Divide with divisibility check; reuse divide intermediate mod. + +func div_divis32_uint8(i uint8) (uint8, bool) { + // 386: "SHRB [$]5," + // 386: "TESTB [$]31,", + // 386: "SETEQ" + // arm64: "UBFX [$]5, R[0-9]+, [$]3" + // arm64: "TSTW [$]31," + // arm64: "CSET EQ" + return i / 32, i%32 == 0 +} + +func div_ndivis32_uint8(i uint8) (uint8, bool) { + // 386: "SHRB [$]5," + // 386: "TESTB [$]31,", + // 386: "SETNE" + // arm64: "UBFX [$]5, R[0-9]+, [$]3" + // arm64: "TSTW [$]31," + // arm64: "CSET NE" + return i / 32, i%32 != 0 +} + +func div_divis32_uint16(i uint16) (uint16, bool) { + // 386: "SHRW [$]5," + // 386: "TESTW [$]31,", + // 386: "SETEQ" + // arm64: "UBFX [$]5, R[0-9]+, [$]11" + // arm64: "TSTW [$]31," + // arm64: "CSET EQ" + return i / 32, i%32 == 0 +} + +func div_ndivis32_uint16(i uint16) (uint16, bool) { + // 386: "SHRW [$]5," + // 386: "TESTW [$]31,", + // 386: "SETNE" + // arm64: "UBFX [$]5, R[0-9]+, [$]11," + // arm64: "TSTW [$]31," + // arm64: "CSET NE" + return i / 32, i%32 != 0 +} + +func div_divis32_uint32(i uint32) (uint32, bool) { + // 386: "SHRL [$]5," + // 386: "TESTL [$]31,", + // 386: "SETEQ" + // arm64: "UBFX [$]5, R[0-9]+, [$]27," + // arm64: "TSTW [$]31," + // arm64: "CSET EQ" + return i / 32, i%32 == 0 +} + +func div_ndivis32_uint32(i uint32) (uint32, bool) { + // 386: "SHRL [$]5," + // 386: "TESTL [$]31,", + // 386: "SETNE" + // arm64: "UBFX [$]5, R[0-9]+, [$]27," + // arm64: "TSTW [$]31," + // arm64: "CSET NE" + return i / 32, i%32 != 0 +} + +func div_divis32_uint64(i uint64) (uint64, bool) { + // 386: "SHRL [$]5," + // 386: "SHLL [$]27," + // 386: "TESTL [$]31,", + // 386: "SETEQ" + // arm64: "LSR [$]5," + // arm64: "TST [$]31," + // arm64: "CSET EQ" + return i / 32, i%32 == 0 +} + +func div_ndivis32_uint64(i uint64) (uint64, bool) { + // 386: "SHRL [$]5," + // 386: "SHLL [$]27," + // 386: "TESTL [$]31,", + // 386: "SETNE" + // arm64: "LSR [$]5," + // arm64: "TST [$]31," + // arm64: "CSET NE" + return i / 32, i%32 != 0 +} + +func div_divis32_int8(i int8) (int8, bool) { + // 386: "SARB [$]7," + // 386: "SHRB [$]3," + // 386: "SARB [$]5," + // 386: "TESTB [$]31,", + // 386: "SETEQ" + // arm64: "SBFX [$]7, R[0-9]+, [$]1," + // arm64: "ADD R[0-9]+>>3," + // arm64: "SBFX [$]5, R[0-9]+, [$]3," + // arm64: "TSTW [$]31," + // arm64: "CSET EQ" + return i / 32, i%32 == 0 +} + +func div_ndivis32_int8(i int8) (int8, bool) { + // 386: "SARB [$]7," + // 386: "SHRB [$]3," + // 386: "SARB [$]5," + // 386: "TESTB [$]31,", + // 386: "SETNE" + // arm64: "SBFX [$]7, R[0-9]+, [$]1," + // arm64: "ADD R[0-9]+>>3," + // arm64: "SBFX [$]5, R[0-9]+, [$]3," + // arm64: "TSTW [$]31," + // arm64: "CSET NE" + return i / 32, i%32 != 0 +} + +func div_divis32_int16(i int16) (int16, bool) { + // 386: "SARW [$]15," + // 386: "SHRW [$]11," + // 386: "SARW [$]5," + // 386: "TESTW [$]31,", + // 386: "SETEQ" + // arm64: "SBFX [$]15, R[0-9]+, [$]1," + // arm64: "ADD R[0-9]+>>11," + // arm64: "SBFX [$]5, R[0-9]+, [$]11," + // arm64: "TSTW [$]31," + // arm64: "CSET EQ" + return i / 32, i%32 == 0 +} + +func div_ndivis32_int16(i int16) (int16, bool) { + // 386: "SARW [$]15," + // 386: "SHRW [$]11," + // 386: "SARW [$]5," + // 386: "TESTW [$]31,", + // 386: "SETNE" + // arm64: "SBFX [$]15, R[0-9]+, [$]1," + // arm64: "ADD R[0-9]+>>11," + // arm64: "SBFX [$]5, R[0-9]+, [$]11," + // arm64: "TSTW [$]31," + // arm64: "CSET NE" + return i / 32, i%32 != 0 +} + +func div_divis32_int32(i int32) (int32, bool) { + // 386: "SARL [$]31," + // 386: "SHRL [$]27," + // 386: "SARL [$]5," + // 386: "TESTL [$]31,", + // 386: "SETEQ" + // arm64: "SBFX [$]31, R[0-9]+, [$]1," + // arm64: "ADD R[0-9]+>>27," + // arm64: "SBFX [$]5, R[0-9]+, [$]27," + // arm64: "TSTW [$]31," + // arm64: "CSET EQ" + return i / 32, i%32 == 0 +} + +func div_ndivis32_int32(i int32) (int32, bool) { + // 386: "SARL [$]31," + // 386: "SHRL [$]27," + // 386: "SARL [$]5," + // 386: "TESTL [$]31,", + // 386: "SETNE" + // arm64: "SBFX [$]31, R[0-9]+, [$]1," + // arm64: "ADD R[0-9]+>>27," + // arm64: "SBFX [$]5, R[0-9]+, [$]27," + // arm64: "TSTW [$]31," + // arm64: "CSET NE" + return i / 32, i%32 != 0 +} + +func div_divis32_int64(i int64) (int64, bool) { + // 386: "SARL [$]31," + // 386: "SHRL [$]27," + // 386: "SARL [$]5," + // 386: "SHLL [$]27," + // 386: "TESTL [$]31,", + // 386: "SETEQ" + // arm64: "ASR [$]63," + // arm64: "ADD R[0-9]+>>59," + // arm64: "ASR [$]5," + // arm64: "TST [$]31," + // arm64: "CSET EQ" + return i / 32, i%32 == 0 +} + +func div_ndivis32_int64(i int64) (int64, bool) { + // 386: "SARL [$]31," + // 386: "SHRL [$]27," + // 386: "SARL [$]5," + // 386: "SHLL [$]27," + // 386: "TESTL [$]31,", + // 386: "SETNE" + // arm64: "ASR [$]63," + // arm64: "ADD R[0-9]+>>59," + // arm64: "ASR [$]5," + // arm64: "TST [$]31," + // arm64: "CSET NE" + return i / 32, i%32 != 0 +} + +// Divisibility and non-divisibility by non-power-of-two. + +func divis6_uint8(i uint8) bool { + // 386: "IMUL3L [$]-85," + // 386: "ROLB [$]7," + // 386: "CMPB .*, [$]42" + // 386: "SETLS" + // arm64: "MOVD [$]-85," + // arm64: "MULW" + // arm64: "UBFX [$]1, R[0-9]+, [$]7," + // arm64: "ORR R[0-9]+<<7" + // arm64: "CMPW [$]42," + // arm64: "CSET LS" + return i%6 == 0 +} + +func ndivis6_uint8(i uint8) bool { + // 386: "IMUL3L [$]-85," + // 386: "ROLB [$]7," + // 386: "CMPB .*, [$]42" + // 386: "SETHI" + // arm64: "MOVD [$]-85," + // arm64: "MULW" + // arm64: "UBFX [$]1, R[0-9]+, [$]7," + // arm64: "ORR R[0-9]+<<7" + // arm64: "CMPW [$]42," + // arm64: "CSET HI" + return i%6 != 0 +} + +func divis6_uint16(i uint16) bool { + // 386: "IMUL3L [$]-21845," + // 386: "ROLW [$]15," + // 386: "CMPW .*, [$]10922" + // 386: "SETLS" + // arm64: "MOVD [$]-21845," + // arm64: "MULW" + // arm64: "ORR R[0-9]+<<16" + // arm64: "RORW [$]17," + // arm64: "MOVD [$]10922," + // arm64: "CSET LS" + return i%6 == 0 +} + +func ndivis6_uint16(i uint16) bool { + // 386: "IMUL3L [$]-21845," + // 386: "ROLW [$]15," + // 386: "CMPW .*, [$]10922" + // 386: "SETHI" + // arm64: "MOVD [$]-21845," + // arm64: "MULW" + // arm64: "ORR R[0-9]+<<16" + // arm64: "RORW [$]17," + // arm64: "MOVD [$]10922," + // arm64: "CSET HI" + return i%6 != 0 +} + +func divis6_uint32(i uint32) bool { + // 386: "IMUL3L [$]-1431655765," + // 386: "ROLL [$]31," + // 386: "CMPL .*, [$]715827882" + // 386: "SETLS" + // arm64: "MOVD [$]-1431655765," + // arm64: "MULW" + // arm64: "RORW [$]1," + // arm64: "MOVD [$]715827882," + // arm64: "CSET LS" + return i%6 == 0 +} + +func ndivis6_uint32(i uint32) bool { + // 386: "IMUL3L [$]-1431655765," + // 386: "ROLL [$]31," + // 386: "CMPL .*, [$]715827882" + // 386: "SETHI" + // arm64: "MOVD [$]-1431655765," + // arm64: "MULW" + // arm64: "RORW [$]1," + // arm64: "MOVD [$]715827882," + // arm64: "CSET HI" + return i%6 != 0 +} + +func divis6_uint64(i uint64) bool { + // 386: "IMUL3L [$]-1431655766," + // 386: "IMUL3L [$]-1431655765," + // 386: "MULL" + // 386: "SHRL [$]1," + // 386: "SHLL [$]31," + // 386: "CMPL .*, [$]715827882" + // 386: "SETLS" + // arm64: "MOVD [$]-6148914691236517205," + // arm64: "MUL " + // arm64: "ROR [$]1," + // arm64: "MOVD [$]3074457345618258602," + // arm64: "CSET LS" + return i%6 == 0 +} + +func ndivis6_uint64(i uint64) bool { + // 386: "IMUL3L [$]-1431655766," + // 386: "IMUL3L [$]-1431655765," + // 386: "MULL" + // 386: "SHRL [$]1," + // 386: "SHLL [$]31," + // 386: "CMPL .*, [$]715827882" + // 386: "SETHI" + // arm64: "MOVD [$]-6148914691236517205," + // arm64: "MUL " + // arm64: "ROR [$]1," + // arm64: "MOVD [$]3074457345618258602," + // arm64: "CSET HI" + return i%6 != 0 +} + +func divis6_int8(i int8) bool { + // 386: "IMUL3L [$]-85," + // 386: "ADDL [$]42," + // 386: "ROLB [$]7," + // 386: "CMPB .*, [$]42" + // 386: "SETLS" + // arm64: "MOVD [$]-85," + // arm64: "MULW" + // arm64: "ADD [$]42," + // arm64: "UBFX [$]1, R[0-9]+, [$]7," + // arm64: "ORR R[0-9]+<<7" + // arm64: "CMPW [$]42," + // arm64: "CSET LS" + return i%6 == 0 +} + +func ndivis6_int8(i int8) bool { + // 386: "IMUL3L [$]-85," + // 386: "ADDL [$]42," + // 386: "ROLB [$]7," + // 386: "CMPB .*, [$]42" + // 386: "SETHI" + // arm64: "MOVD [$]-85," + // arm64: "MULW" + // arm64: "ADD [$]42," + // arm64: "UBFX [$]1, R[0-9]+, [$]7," + // arm64: "ORR R[0-9]+<<7" + // arm64: "CMPW [$]42," + // arm64: "CSET HI" + return i%6 != 0 +} + +func divis6_int16(i int16) bool { + // 386: "IMUL3L [$]-21845," + // 386: "ADDL [$]10922," + // 386: "ROLW [$]15," + // 386: "CMPW .*, [$]10922" + // 386: "SETLS" + // arm64: "MOVD [$]-21845," + // arm64: "MULW" + // arm64: "MOVD [$]10922," + // arm64: "ADD " + // arm64: "ORR R[0-9]+<<16" + // arm64: "RORW [$]17," + // arm64: "MOVD [$]10922," + // arm64: "CSET LS" + return i%6 == 0 +} + +func ndivis6_int16(i int16) bool { + // 386: "IMUL3L [$]-21845," + // 386: "ADDL [$]10922," + // 386: "ROLW [$]15," + // 386: "CMPW .*, [$]10922" + // 386: "SETHI" + // arm64: "MOVD [$]-21845," + // arm64: "MULW" + // arm64: "MOVD [$]10922," + // arm64: "ADD " + // arm64: "ORR R[0-9]+<<16" + // arm64: "RORW [$]17," + // arm64: "MOVD [$]10922," + // arm64: "CSET HI" + return i%6 != 0 +} + +func divis6_int32(i int32) bool { + // 386: "IMUL3L [$]-1431655765," + // 386: "ADDL [$]715827882," + // 386: "ROLL [$]31," + // 386: "CMPL .*, [$]715827882" + // 386: "SETLS" + // arm64: "MOVD [$]-1431655765," + // arm64: "MULW" + // arm64: "MOVD [$]715827882," + // arm64: "ADD " + // arm64: "RORW [$]1," + // arm64: "CSET LS" + return i%6 == 0 +} + +func ndivis6_int32(i int32) bool { + // 386: "IMUL3L [$]-1431655765," + // 386: "ADDL [$]715827882," + // 386: "ROLL [$]31," + // 386: "CMPL .*, [$]715827882" + // 386: "SETHI" + // arm64: "MOVD [$]-1431655765," + // arm64: "MULW" + // arm64: "MOVD [$]715827882," + // arm64: "ADD " + // arm64: "RORW [$]1," + // arm64: "CSET HI" + return i%6 != 0 +} + +func divis6_int64(i int64) bool { + // 386: "IMUL3L [$]-1431655766," + // 386: "IMUL3L [$]-1431655765," + // 386: "ADCL [$]715827882," + // 386: "CMPL .*, [$]715827882" + // 386: "CMPL .*, [$]-1431655766" + // 386: "SETLS" + // arm64: "MOVD [$]-6148914691236517205," + // arm64: "MUL " + // arm64: "MOVD [$]3074457345618258602," + // arm64: "ADD " + // arm64: "ROR [$]1," + // arm64: "CSET LS" + return i%6 == 0 +} + +func ndivis6_int64(i int64) bool { + // 386: "IMUL3L [$]-1431655766," + // 386: "IMUL3L [$]-1431655765," + // 386: "ADCL [$]715827882," + // 386: "CMPL .*, [$]715827882" + // 386: "CMPL .*, [$]-1431655766" + // 386: "SETHI" + // arm64: "MOVD [$]-6148914691236517205," + // arm64: "MUL " + // arm64: "MOVD [$]3074457345618258602," + // arm64: "ADD " + // arm64: "ROR [$]1," + // arm64: "CSET HI" + return i%6 != 0 +} + +func div_divis6_uint8(i uint8) (uint8, bool) { + // 386: "IMUL3L [$]342," + // 386: "SHRL [$]11," + // 386: "SETEQ" + // 386: -"RO[RL]" + // arm64: "MOVD [$]342," + // arm64: "MULW" + // arm64: "UBFX [$]11, R[0-9]+, [$]21," + // arm64: "CSET EQ" + // arm64: -"RO[RL]" + return i / 6, i%6 == 0 +} + +func div_ndivis6_uint8(i uint8) (uint8, bool) { + // 386: "IMUL3L [$]342," + // 386: "SHRL [$]11," + // 386: "SETNE" + // 386: -"RO[RL]" + // arm64: "MOVD [$]342," + // arm64: "MULW" + // arm64: "UBFX [$]11, R[0-9]+, [$]21," + // arm64: "CSET NE" + // arm64: -"RO[RL]" + return i / 6, i%6 != 0 +} + +func div_divis6_uint16(i uint16) (uint16, bool) { + // 386: "IMUL3L [$]43691," + // 386: "SHRL [$]18," + // 386: "SHLL [$]1," + // 386: "SETEQ" + // 386: -"RO[RL]" + // arm64: "MOVD [$]87382," + // arm64: "MUL " + // arm64: "LSR [$]19," + // arm64: "CSET EQ" + // arm64: -"RO[RL]" + return i / 6, i%6 == 0 +} + +func div_ndivis6_uint16(i uint16) (uint16, bool) { + // 386: "IMUL3L [$]43691," + // 386: "SHRL [$]18," + // 386: "SHLL [$]1," + // 386: "SETNE" + // 386: -"RO[RL]" + // arm64: "MOVD [$]87382," + // arm64: "MUL " + // arm64: "LSR [$]19," + // arm64: "CSET NE" + // arm64: -"RO[RL]" + return i / 6, i%6 != 0 +} + +func div_divis6_uint32(i uint32) (uint32, bool) { + // 386: "MOVL [$]-1431655765," + // 386: "SHRL [$]2," + // 386: "SHLL [$]1," + // 386: "SETEQ" + // 386: -"RO[RL]" + // arm64: "MOVD [$]2863311531," + // arm64: "MUL " + // arm64: "LSR [$]34," + // arm64: "CSET EQ" + // arm64: -"RO[RL]" + return i / 6, i%6 == 0 +} + +func div_ndivis6_uint32(i uint32) (uint32, bool) { + // 386: "MOVL [$]-1431655765," + // 386: "SHRL [$]2," + // 386: "SHLL [$]1," + // 386: "SETNE" + // 386: -"RO[RL]" + // arm64: "MOVD [$]2863311531," + // arm64: "MUL " + // arm64: "LSR [$]34," + // arm64: "CSET NE" + // arm64: -"RO[RL]" + return i / 6, i%6 != 0 +} + +func div_divis6_uint64(i uint64) (uint64, bool) { + // 386: "MOVL [$]-1431655766," + // 386: "MOVL [$]-1431655765," + // 386: "MULL" + // 386: "SHRL [$]2," + // 386: "SHLL [$]30," + // 386: "SETEQ" + // 386: -".*CALL" + // 386: -"RO[RL]" + // arm64: "MOVD [$]-6148914691236517205," + // arm64: "UMULH" + // arm64: "LSR [$]2," + // arm64: "CSET EQ" + // arm64: -"RO[RL]" + return i / 6, i%6 == 0 +} + +func div_ndivis6_uint64(i uint64) (uint64, bool) { + // 386: "MOVL [$]-1431655766," + // 386: "MOVL [$]-1431655765," + // 386: "MULL" + // 386: "SHRL [$]2," + // 386: "SHLL [$]30," + // 386: "SETNE" + // 386: -".*CALL" + // 386: -"RO[RL]" + // arm64: "MOVD [$]-6148914691236517205," + // arm64: "UMULH" + // arm64: "LSR [$]2," + // arm64: "CSET NE" + // arm64: -"RO[RL]" + return i / 6, i%6 != 0 +} + +func div_divis6_int8(i int8) (int8, bool) { + // 386: "SARL [$]31," + // 386: "IMUL3L [$]171," + // 386: "SARL [$]10," + // 386: "SHLL [$]1," + // 386: "SETEQ" + // 386: -"RO[RL]" + // arm64: "MOVD [$]171," + // arm64: "MULW" + // arm64: "SBFX [$]10, R[0-9]+, [$]22," + // arm64: "SUB R[0-9]+->31," + // arm64: "CSET EQ" + // arm64: -"RO[RL]" + return i / 6, i%6 == 0 +} + +func div_ndivis6_int8(i int8) (int8, bool) { + // 386: "SARL [$]31," + // 386: "IMUL3L [$]171," + // 386: "SARL [$]10," + // 386: "SHLL [$]1," + // 386: "SETNE" + // 386: -"RO[RL]" + // arm64: "MOVD [$]171," + // arm64: "MULW" + // arm64: "SBFX [$]10, R[0-9]+, [$]22," + // arm64: "SUB R[0-9]+->31," + // arm64: "CSET NE" + // arm64: -"RO[RL]" + return i / 6, i%6 != 0 +} + +func div_divis6_int16(i int16) (int16, bool) { + // 386: "SARL [$]31," + // 386: "IMUL3L [$]43691," + // 386: "SARL [$]18," + // 386: "SHLL [$]1," + // 386: "SETEQ" + // 386: -"RO[RL]" + // arm64: "MOVD [$]43691," + // arm64: "MULW" + // arm64: "SBFX [$]18, R[0-9]+, [$]14," + // arm64: "SUB R[0-9]+->31," + // arm64: "CSET EQ" + // arm64: -"RO[RL]" + return i / 6, i%6 == 0 +} + +func div_ndivis6_int16(i int16) (int16, bool) { + // 386: "SARL [$]31," + // 386: "IMUL3L [$]43691," + // 386: "SARL [$]18," + // 386: "SHLL [$]1," + // 386: "SETNE" + // 386: -"RO[RL]" + // arm64: "MOVD [$]43691," + // arm64: "MULW" + // arm64: "SBFX [$]18, R[0-9]+, [$]14," + // arm64: "SUB R[0-9]+->31," + // arm64: "CSET NE" + // arm64: -"RO[RL]" + return i / 6, i%6 != 0 +} + +func div_divis6_int32(i int32) (int32, bool) { + // 386: "SARL [$]31," + // 386: "MOVL [$]-1431655765," + // 386: "IMULL" + // 386: "SARL [$]2," + // 386: "SHLL [$]1," + // 386: "SETEQ" + // 386: -"RO[RL]" + // arm64: "MOVD [$]2863311531," + // arm64: "MUL " + // arm64: "ASR [$]34," + // arm64: "SUB R[0-9]+->63," + // arm64: "CSET EQ" + // arm64: -"RO[RL]" + return i / 6, i%6 == 0 +} + +func div_ndivis6_int32(i int32) (int32, bool) { + // 386: "SARL [$]31," + // 386: "MOVL [$]-1431655765," + // 386: "IMULL" + // 386: "SARL [$]2," + // 386: "SHLL [$]1," + // 386: "SETNE" + // 386: -"RO[RL]" + // arm64: "MOVD [$]2863311531," + // arm64: "MUL " + // arm64: "ASR [$]34," + // arm64: "SUB R[0-9]+->63," + // arm64: "CSET NE" + // arm64: -"RO[RL]" + return i / 6, i%6 != 0 +} + +func div_divis6_int64(i int64) (int64, bool) { + // 386: "ANDL [$]-1431655766," + // 386: "ANDL [$]-1431655765," + // 386: "MOVL [$]-1431655766," + // 386: "MOVL [$]-1431655765," + // 386: "SUBL" "SBBL" + // 386: "MULL" + // 386: "SETEQ" + // 386: -"SET(LS|HI)" + // 386: -".*CALL" + // 386: -"RO[RL]" + // arm64: "MOVD [$]-6148914691236517205," + // arm64: "SMULH" + // arm64: "ADD" + // arm64: "ASR [$]2," + // arm64: "SUB R[0-9]+->63," + // arm64: "CSET EQ" + // arm64: -"RO[RL]" + return i / 6, i%6 == 0 +} + +func div_ndivis6_int64(i int64) (int64, bool) { + // 386: "ANDL [$]-1431655766," + // 386: "ANDL [$]-1431655765," + // 386: "MOVL [$]-1431655766," + // 386: "MOVL [$]-1431655765," + // 386: "SUBL" "SBBL" + // 386: "MULL" + // 386: "SETNE" + // 386: -"SET(LS|HI)" + // 386: -".*CALL" + // 386: -"RO[RL]" + // arm64: "MOVD [$]-6148914691236517205," + // arm64: "SMULH" + // arm64: "ADD" + // arm64: "ASR [$]2," + // arm64: "SUB R[0-9]+->63," + // arm64: "CSET NE" + // arm64: -"RO[RL]" + return i / 6, i%6 != 0 +} diff --git a/test/codegen/floats.go b/test/codegen/floats.go index 666c983b56a..343f8fab39b 100644 --- a/test/codegen/floats.go +++ b/test/codegen/floats.go @@ -6,6 +6,8 @@ package codegen +import "math" + // This file contains codegen tests related to arithmetic // simplifications and optimizations on float types. // For codegen tests on integer types, see arithmetic.go. @@ -15,42 +17,42 @@ package codegen // --------------------- // func Mul2(f float64) float64 { - // 386/sse2:"ADDSD",-"MULSD" - // amd64:"ADDSD",-"MULSD" - // arm/7:"ADDD",-"MULD" - // arm64:"FADDD",-"FMULD" - // loong64:"ADDD",-"MULD" - // ppc64x:"FADD",-"FMUL" - // riscv64:"FADDD",-"FMULD" + // 386/sse2:"ADDSD" -"MULSD" + // amd64:"ADDSD" -"MULSD" + // arm/7:"ADDD" -"MULD" + // arm64:"FADDD" -"FMULD" + // loong64:"ADDD" -"MULD" + // ppc64x:"FADD" -"FMUL" + // riscv64:"FADDD" -"FMULD" return f * 2.0 } func DivPow2(f1, f2, f3 float64) (float64, float64, float64) { - // 386/sse2:"MULSD",-"DIVSD" - // amd64:"MULSD",-"DIVSD" - // arm/7:"MULD",-"DIVD" - // arm64:"FMULD",-"FDIVD" - // loong64:"MULD",-"DIVD" - // ppc64x:"FMUL",-"FDIV" - // riscv64:"FMULD",-"FDIVD" + // 386/sse2:"MULSD" -"DIVSD" + // amd64:"MULSD" -"DIVSD" + // arm/7:"MULD" -"DIVD" + // arm64:"FMULD" -"FDIVD" + // loong64:"MULD" -"DIVD" + // ppc64x:"FMUL" -"FDIV" + // riscv64:"FMULD" -"FDIVD" x := f1 / 16.0 - // 386/sse2:"MULSD",-"DIVSD" - // amd64:"MULSD",-"DIVSD" - // arm/7:"MULD",-"DIVD" - // arm64:"FMULD",-"FDIVD" - // loong64:"MULD",-"DIVD" - // ppc64x:"FMUL",-"FDIVD" - // riscv64:"FMULD",-"FDIVD" + // 386/sse2:"MULSD" -"DIVSD" + // amd64:"MULSD" -"DIVSD" + // arm/7:"MULD" -"DIVD" + // arm64:"FMULD" -"FDIVD" + // loong64:"MULD" -"DIVD" + // ppc64x:"FMUL" -"FDIVD" + // riscv64:"FMULD" -"FDIVD" y := f2 / 0.125 - // 386/sse2:"ADDSD",-"DIVSD",-"MULSD" - // amd64:"ADDSD",-"DIVSD",-"MULSD" - // arm/7:"ADDD",-"MULD",-"DIVD" - // arm64:"FADDD",-"FMULD",-"FDIVD" - // loong64:"ADDD",-"MULD",-"DIVD" - // ppc64x:"FADD",-"FMUL",-"FDIV" - // riscv64:"FADDD",-"FMULD",-"FDIVD" + // 386/sse2:"ADDSD" -"DIVSD" -"MULSD" + // amd64:"ADDSD" -"DIVSD" -"MULSD" + // arm/7:"ADDD" -"MULD" -"DIVD" + // arm64:"FADDD" -"FMULD" -"FDIVD" + // loong64:"ADDD" -"MULD" -"DIVD" + // ppc64x:"FADD" -"FMUL" -"FDIV" + // riscv64:"FADDD" -"FMULD" -"FDIVD" z := f3 / 0.5 return x, y, z @@ -73,67 +75,67 @@ func indexStore(b0 []float64, b1 float64, idx int) { // ----------- // func FusedAdd32(x, y, z float32) float32 { - // s390x:"FMADDS\t" - // ppc64x:"FMADDS\t" + // s390x:"FMADDS " + // ppc64x:"FMADDS " // arm64:"FMADDS" - // loong64:"FMADDF\t" - // riscv64:"FMADDS\t" - // amd64/v3:"VFMADD231SS\t" + // loong64:"FMADDF " + // riscv64:"FMADDS " + // amd64/v3:"VFMADD231SS " return x*y + z } func FusedSub32_a(x, y, z float32) float32 { - // s390x:"FMSUBS\t" - // ppc64x:"FMSUBS\t" - // riscv64:"FMSUBS\t" - // loong64:"FMSUBF\t" + // s390x:"FMSUBS " + // ppc64x:"FMSUBS " + // riscv64:"FMSUBS " + // loong64:"FMSUBF " return x*y - z } func FusedSub32_b(x, y, z float32) float32 { // arm64:"FMSUBS" - // loong64:"FNMSUBF\t" - // riscv64:"FNMSUBS\t" + // loong64:"FNMSUBF " + // riscv64:"FNMSUBS " return z - x*y } func FusedAdd64(x, y, z float64) float64 { - // s390x:"FMADD\t" - // ppc64x:"FMADD\t" + // s390x:"FMADD " + // ppc64x:"FMADD " // arm64:"FMADDD" - // loong64:"FMADDD\t" - // riscv64:"FMADDD\t" - // amd64/v3:"VFMADD231SD\t" + // loong64:"FMADDD " + // riscv64:"FMADDD " + // amd64/v3:"VFMADD231SD " return x*y + z } func FusedSub64_a(x, y, z float64) float64 { - // s390x:"FMSUB\t" - // ppc64x:"FMSUB\t" - // riscv64:"FMSUBD\t" - // loong64:"FMSUBD\t" + // s390x:"FMSUB " + // ppc64x:"FMSUB " + // riscv64:"FMSUBD " + // loong64:"FMSUBD " return x*y - z } func FusedSub64_b(x, y, z float64) float64 { // arm64:"FMSUBD" - // loong64:"FNMSUBD\t" - // riscv64:"FNMSUBD\t" + // loong64:"FNMSUBD " + // riscv64:"FNMSUBD " return z - x*y } func Cmp(f float64) bool { - // arm64:"FCMPD","(BGT|BLE|BMI|BPL)",-"CSET\tGT",-"CBZ" + // arm64:"FCMPD" "(BGT|BLE|BMI|BPL)" -"CSET GT" -"CBZ" return f > 4 || f < -4 } func CmpZero64(f float64) bool { - // s390x:"LTDBR",-"FCMPU" + // s390x:"LTDBR" -"FCMPU" return f <= 0 } func CmpZero32(f float32) bool { - // s390x:"LTEBR",-"CEBR" + // s390x:"LTEBR" -"CEBR" return f <= 0 } @@ -218,43 +220,43 @@ func Float32Max(a, b float32) float32 { // ------------------------ // func Float32ConstantZero() float32 { - // arm64:"FMOVS\tZR," + // arm64:"FMOVS ZR," return 0.0 } func Float32ConstantChipFloat() float32 { - // arm64:"FMOVS\t[$]\\(2\\.25\\)," + // arm64:"FMOVS [$]\\(2\\.25\\)," return 2.25 } func Float32Constant() float32 { - // arm64:"FMOVS\t[$]f32\\.42440000\\(SB\\)" - // ppc64x/power8:"FMOVS\t[$]f32\\.42440000\\(SB\\)" - // ppc64x/power9:"FMOVS\t[$]f32\\.42440000\\(SB\\)" - // ppc64x/power10:"XXSPLTIDP\t[$]1111752704," + // arm64:"FMOVS [$]f32\\.42440000\\(SB\\)" + // ppc64x/power8:"FMOVS [$]f32\\.42440000\\(SB\\)" + // ppc64x/power9:"FMOVS [$]f32\\.42440000\\(SB\\)" + // ppc64x/power10:"XXSPLTIDP [$]1111752704," return 49.0 } func Float64ConstantZero() float64 { - // arm64:"FMOVD\tZR," + // arm64:"FMOVD ZR," return 0.0 } func Float64ConstantChipFloat() float64 { - // arm64:"FMOVD\t[$]\\(2\\.25\\)," + // arm64:"FMOVD [$]\\(2\\.25\\)," return 2.25 } func Float64Constant() float64 { - // arm64:"FMOVD\t[$]f64\\.4048800000000000\\(SB\\)" - // ppc64x/power8:"FMOVD\t[$]f64\\.4048800000000000\\(SB\\)" - // ppc64x/power9:"FMOVD\t[$]f64\\.4048800000000000\\(SB\\)" - // ppc64x/power10:"XXSPLTIDP\t[$]1111752704," + // arm64:"FMOVD [$]f64\\.4048800000000000\\(SB\\)" + // ppc64x/power8:"FMOVD [$]f64\\.4048800000000000\\(SB\\)" + // ppc64x/power9:"FMOVD [$]f64\\.4048800000000000\\(SB\\)" + // ppc64x/power10:"XXSPLTIDP [$]1111752704," return 49.0 } func Float32DenormalConstant() float32 { - // ppc64x:"FMOVS\t[$]f32\\.00400000\\(SB\\)" + // ppc64x:"FMOVS [$]f32\\.00400000\\(SB\\)" return 0x1p-127 } @@ -262,16 +264,52 @@ func Float32DenormalConstant() float32 { // denormal float32 value. On ppc64x, denormal values cannot // be used with XXSPLTIDP. func Float64DenormalFloat32Constant() float64 { - // ppc64x:"FMOVD\t[$]f64\\.3800000000000000\\(SB\\)" + // ppc64x:"FMOVD [$]f64\\.3800000000000000\\(SB\\)" return 0x1p-127 } func Float32ConstantStore(p *float32) { - // amd64:"MOVL\t[$]1085133554" + // amd64:"MOVL [$]1085133554" + // riscv64: "MOVF [$]f32.40add2f2" *p = 5.432 } func Float64ConstantStore(p *float64) { - // amd64:"MOVQ\t[$]4617801906721357038" + // amd64: "MOVQ [$]4617801906721357038" + // riscv64: "MOVD [$]f64.4015ba5e353f7cee" *p = 5.432 } + +// ------------------------ // +// Subnormal tests // +// ------------------------ // + +func isSubnormal(x float64) bool { + // riscv64:"FCLASSD" -"FABSD" + return math.Abs(x) < 2.2250738585072014e-308 +} + +func isNormal(x float64) bool { + // riscv64:"FCLASSD" -"FABSD" + return math.Abs(x) >= 0x1p-1022 +} + +func isPosSubnormal(x float64) bool { + // riscv64:"FCLASSD" + return x > 0 && x < 2.2250738585072014e-308 +} + +func isNegSubnormal(x float64) bool { + // riscv64:"FCLASSD" + return x < 0 && x > -0x1p-1022 +} + +func isPosNormal(x float64) bool { + // riscv64:"FCLASSD" + return x >= 2.2250738585072014e-308 +} + +func isNegNormal(x float64) bool { + // riscv64:"FCLASSD" + return x <= -2.2250738585072014e-308 +} diff --git a/test/codegen/fuse.go b/test/codegen/fuse.go index 8d6ea3c5c74..4fbb03bef80 100644 --- a/test/codegen/fuse.go +++ b/test/codegen/fuse.go @@ -6,6 +6,8 @@ package codegen +import "math" + // Notes: // - these examples use channels to provide a source of // unknown values that cannot be optimized away @@ -17,57 +19,57 @@ package codegen // ---------------------------------- // func si1c(c <-chan int64) { - // amd64:"CMPQ\t.+, [$]256" - // s390x:"CLGIJ\t[$]12, R[0-9]+, [$]255" + // amd64:"CMPQ .+, [$]256" + // s390x:"CLGIJ [$]12, R[0-9]+, [$]255" for x := <-c; x >= 0 && x < 256; x = <-c { } } func si2c(c <-chan int32) { - // amd64:"CMPL\t.+, [$]256" - // s390x:"CLIJ\t[$]12, R[0-9]+, [$]255" + // amd64:"CMPL .+, [$]256" + // s390x:"CLIJ [$]12, R[0-9]+, [$]255" for x := <-c; x >= 0 && x < 256; x = <-c { } } func si3c(c <-chan int16) { - // amd64:"CMPW\t.+, [$]256" - // s390x:"CLIJ\t[$]12, R[0-9]+, [$]255" + // amd64:"CMPW .+, [$]256" + // s390x:"CLIJ [$]12, R[0-9]+, [$]255" for x := <-c; x >= 0 && x < 256; x = <-c { } } func si4c(c <-chan int8) { - // amd64:"CMPB\t.+, [$]10" - // s390x:"CLIJ\t[$]4, R[0-9]+, [$]10" + // amd64:"CMPB .+, [$]10" + // s390x:"CLIJ [$]4, R[0-9]+, [$]10" for x := <-c; x >= 0 && x < 10; x = <-c { } } func si5c(c <-chan int64) { - // amd64:"CMPQ\t.+, [$]251","ADDQ\t[$]-5," - // s390x:"CLGIJ\t[$]4, R[0-9]+, [$]251","ADD\t[$]-5," + // amd64:"CMPQ .+, [$]251" "ADDQ [$]-5," + // s390x:"CLGIJ [$]4, R[0-9]+, [$]251" "ADD [$]-5," for x := <-c; x < 256 && x > 4; x = <-c { } } func si6c(c <-chan int32) { - // amd64:"CMPL\t.+, [$]255","DECL\t" - // s390x:"CLIJ\t[$]12, R[0-9]+, [$]255","ADDW\t[$]-1," + // amd64:"CMPL .+, [$]255" "DECL " + // s390x:"CLIJ [$]12, R[0-9]+, [$]255" "ADDW [$]-1," for x := <-c; x > 0 && x <= 256; x = <-c { } } func si7c(c <-chan int16) { - // amd64:"CMPW\t.+, [$]60","ADDL\t[$]10," - // s390x:"CLIJ\t[$]12, R[0-9]+, [$]60","ADDW\t[$]10," + // amd64:"CMPW .+, [$]60" "ADDL [$]10," + // s390x:"CLIJ [$]12, R[0-9]+, [$]60" "ADDW [$]10," for x := <-c; x >= -10 && x <= 50; x = <-c { } } func si8c(c <-chan int8) { - // amd64:"CMPB\t.+, [$]126","ADDL\t[$]126," - // s390x:"CLIJ\t[$]4, R[0-9]+, [$]126","ADDW\t[$]126," + // amd64:"CMPB .+, [$]126" "ADDL [$]126," + // s390x:"CLIJ [$]4, R[0-9]+, [$]126" "ADDW [$]126," for x := <-c; x >= -126 && x < 0; x = <-c { } } @@ -77,57 +79,57 @@ func si8c(c <-chan int8) { // ---------------------------------- // func si1d(c <-chan int64) { - // amd64:"CMPQ\t.+, [$]256" - // s390x:"CLGIJ\t[$]2, R[0-9]+, [$]255" + // amd64:"CMPQ .+, [$]256" + // s390x:"CLGIJ [$]2, R[0-9]+, [$]255" for x := <-c; x < 0 || x >= 256; x = <-c { } } func si2d(c <-chan int32) { - // amd64:"CMPL\t.+, [$]256" - // s390x:"CLIJ\t[$]2, R[0-9]+, [$]255" + // amd64:"CMPL .+, [$]256" + // s390x:"CLIJ [$]2, R[0-9]+, [$]255" for x := <-c; x < 0 || x >= 256; x = <-c { } } func si3d(c <-chan int16) { - // amd64:"CMPW\t.+, [$]256" - // s390x:"CLIJ\t[$]2, R[0-9]+, [$]255" + // amd64:"CMPW .+, [$]256" + // s390x:"CLIJ [$]2, R[0-9]+, [$]255" for x := <-c; x < 0 || x >= 256; x = <-c { } } func si4d(c <-chan int8) { - // amd64:"CMPB\t.+, [$]10" - // s390x:"CLIJ\t[$]10, R[0-9]+, [$]10" + // amd64:"CMPB .+, [$]10" + // s390x:"CLIJ [$]10, R[0-9]+, [$]10" for x := <-c; x < 0 || x >= 10; x = <-c { } } func si5d(c <-chan int64) { - // amd64:"CMPQ\t.+, [$]251","ADDQ\t[$]-5," - // s390x:"CLGIJ\t[$]10, R[0-9]+, [$]251","ADD\t[$]-5," + // amd64:"CMPQ .+, [$]251" "ADDQ [$]-5," + // s390x:"CLGIJ [$]10, R[0-9]+, [$]251" "ADD [$]-5," for x := <-c; x >= 256 || x <= 4; x = <-c { } } func si6d(c <-chan int32) { - // amd64:"CMPL\t.+, [$]255","DECL\t" - // s390x:"CLIJ\t[$]2, R[0-9]+, [$]255","ADDW\t[$]-1," + // amd64:"CMPL .+, [$]255" "DECL " + // s390x:"CLIJ [$]2, R[0-9]+, [$]255" "ADDW [$]-1," for x := <-c; x <= 0 || x > 256; x = <-c { } } func si7d(c <-chan int16) { - // amd64:"CMPW\t.+, [$]60","ADDL\t[$]10," - // s390x:"CLIJ\t[$]2, R[0-9]+, [$]60","ADDW\t[$]10," + // amd64:"CMPW .+, [$]60" "ADDL [$]10," + // s390x:"CLIJ [$]2, R[0-9]+, [$]60" "ADDW [$]10," for x := <-c; x < -10 || x > 50; x = <-c { } } func si8d(c <-chan int8) { - // amd64:"CMPB\t.+, [$]126","ADDL\t[$]126," - // s390x:"CLIJ\t[$]10, R[0-9]+, [$]126","ADDW\t[$]126," + // amd64:"CMPB .+, [$]126" "ADDL [$]126," + // s390x:"CLIJ [$]10, R[0-9]+, [$]126" "ADDW [$]126," for x := <-c; x < -126 || x >= 0; x = <-c { } } @@ -137,29 +139,29 @@ func si8d(c <-chan int8) { // ------------------------------------ // func ui1c(c <-chan uint64) { - // amd64:"CMPQ\t.+, [$]251","ADDQ\t[$]-5," - // s390x:"CLGIJ\t[$]4, R[0-9]+, [$]251","ADD\t[$]-5," + // amd64:"CMPQ .+, [$]251" "ADDQ [$]-5," + // s390x:"CLGIJ [$]4, R[0-9]+, [$]251" "ADD [$]-5," for x := <-c; x < 256 && x > 4; x = <-c { } } func ui2c(c <-chan uint32) { - // amd64:"CMPL\t.+, [$]255","DECL\t" - // s390x:"CLIJ\t[$]12, R[0-9]+, [$]255","ADDW\t[$]-1," + // amd64:"CMPL .+, [$]255" "DECL " + // s390x:"CLIJ [$]12, R[0-9]+, [$]255" "ADDW [$]-1," for x := <-c; x > 0 && x <= 256; x = <-c { } } func ui3c(c <-chan uint16) { - // amd64:"CMPW\t.+, [$]40","ADDL\t[$]-10," - // s390x:"CLIJ\t[$]12, R[0-9]+, [$]40","ADDW\t[$]-10," + // amd64:"CMPW .+, [$]40" "ADDL [$]-10," + // s390x:"CLIJ [$]12, R[0-9]+, [$]40" "ADDW [$]-10," for x := <-c; x >= 10 && x <= 50; x = <-c { } } func ui4c(c <-chan uint8) { - // amd64:"CMPB\t.+, [$]2","ADDL\t[$]-126," - // s390x:"CLIJ\t[$]4, R[0-9]+, [$]2","ADDW\t[$]-126," + // amd64:"CMPB .+, [$]2" "ADDL [$]-126," + // s390x:"CLIJ [$]4, R[0-9]+, [$]2" "ADDW [$]-126," for x := <-c; x >= 126 && x < 128; x = <-c { } } @@ -169,33 +171,111 @@ func ui4c(c <-chan uint8) { // ------------------------------------ // func ui1d(c <-chan uint64) { - // amd64:"CMPQ\t.+, [$]251","ADDQ\t[$]-5," - // s390x:"CLGIJ\t[$]10, R[0-9]+, [$]251","ADD\t[$]-5," + // amd64:"CMPQ .+, [$]251" "ADDQ [$]-5," + // s390x:"CLGIJ [$]10, R[0-9]+, [$]251" "ADD [$]-5," for x := <-c; x >= 256 || x <= 4; x = <-c { } } func ui2d(c <-chan uint32) { - // amd64:"CMPL\t.+, [$]254","ADDL\t[$]-2," - // s390x:"CLIJ\t[$]2, R[0-9]+, [$]254","ADDW\t[$]-2," + // amd64:"CMPL .+, [$]254" "ADDL [$]-2," + // s390x:"CLIJ [$]2, R[0-9]+, [$]254" "ADDW [$]-2," for x := <-c; x <= 1 || x > 256; x = <-c { } } func ui3d(c <-chan uint16) { - // amd64:"CMPW\t.+, [$]40","ADDL\t[$]-10," - // s390x:"CLIJ\t[$]2, R[0-9]+, [$]40","ADDW\t[$]-10," + // amd64:"CMPW .+, [$]40" "ADDL [$]-10," + // s390x:"CLIJ [$]2, R[0-9]+, [$]40" "ADDW [$]-10," for x := <-c; x < 10 || x > 50; x = <-c { } } func ui4d(c <-chan uint8) { - // amd64:"CMPB\t.+, [$]2","ADDL\t[$]-126," - // s390x:"CLIJ\t[$]10, R[0-9]+, [$]2","ADDW\t[$]-126," + // amd64:"CMPB .+, [$]2" "ADDL [$]-126," + // s390x:"CLIJ [$]10, R[0-9]+, [$]2" "ADDW [$]-126," for x := <-c; x < 126 || x >= 128; x = <-c { } } +// -------------------------------------// +// merge NaN checks // +// ------------------------------------ // + +func f64NaNOrPosInf(c <-chan float64) { + // This test assumes IsInf(x, 1) is implemented as x > MaxFloat rather than x == Inf(1). + + // amd64:"JCS" -"JNE" -"JPS" -"JPC" + // riscv64:"FCLASSD" -"FLED" -"FLTD" -"FNED" -"FEQD" + for x := <-c; math.IsNaN(x) || math.IsInf(x, 1); x = <-c { + } +} + +func f64NaNOrNegInf(c <-chan float64) { + // This test assumes IsInf(x, -1) is implemented as x < -MaxFloat rather than x == Inf(-1). + + // amd64:"JCS" -"JNE" -"JPS" -"JPC" + // riscv64:"FCLASSD" -"FLED" -"FLTD" -"FNED" -"FEQD" + for x := <-c; math.IsNaN(x) || math.IsInf(x, -1); x = <-c { + } +} + +func f64NaNOrLtOne(c <-chan float64) { + // amd64:"JCS" -"JNE" -"JPS" -"JPC" + // riscv64:"FLED" -"FLTD" -"FNED" -"FEQD" + for x := <-c; math.IsNaN(x) || x < 1; x = <-c { + } +} + +func f64NaNOrLteOne(c <-chan float64) { + // amd64:"JLS" -"JNE" -"JPS" -"JPC" + // riscv64:"FLTD" -"FLED" -"FNED" -"FEQD" + for x := <-c; x <= 1 || math.IsNaN(x); x = <-c { + } +} + +func f64NaNOrGtOne(c <-chan float64) { + // amd64:"JCS" -"JNE" -"JPS" -"JPC" + // riscv64:"FLED" -"FLTD" -"FNED" -"FEQD" + for x := <-c; math.IsNaN(x) || x > 1; x = <-c { + } +} + +func f64NaNOrGteOne(c <-chan float64) { + // amd64:"JLS" -"JNE" -"JPS" -"JPC" + // riscv64:"FLTD" -"FLED" -"FNED" -"FEQD" + for x := <-c; x >= 1 || math.IsNaN(x); x = <-c { + } +} + +func f32NaNOrLtOne(c <-chan float32) { + // amd64:"JCS" -"JNE" -"JPS" -"JPC" + // riscv64:"FLES" -"FLTS" -"FNES" -"FEQS" + for x := <-c; x < 1 || x != x; x = <-c { + } +} + +func f32NaNOrLteOne(c <-chan float32) { + // amd64:"JLS" -"JNE" -"JPS" -"JPC" + // riscv64:"FLTS" -"FLES" -"FNES" -"FEQS" + for x := <-c; x != x || x <= 1; x = <-c { + } +} + +func f32NaNOrGtOne(c <-chan float32) { + // amd64:"JCS" -"JNE" -"JPS" -"JPC" + // riscv64:"FLES" -"FLTS" -"FNES" -"FEQS" + for x := <-c; x > 1 || x != x; x = <-c { + } +} + +func f32NaNOrGteOne(c <-chan float32) { + // amd64:"JLS" -"JNE" -"JPS" -"JPC" + // riscv64:"FLTS" -"FLES" -"FNES" -"FEQS" + for x := <-c; x != x || x >= 1; x = <-c { + } +} + // ------------------------------------ // // regressions // // ------------------------------------ // @@ -211,8 +291,8 @@ func lt20(x uint64) bool { func issue74915(c <-chan uint64) { // Check that the optimization is not blocked by function inlining. - // amd64:"CMPQ\t.+, [$]16","ADDQ\t[$]-4," - // s390x:"CLGIJ\t[$]4, R[0-9]+, [$]16","ADD\t[$]-4," + // amd64:"CMPQ .+, [$]16" "ADDQ [$]-4," + // s390x:"CLGIJ [$]4, R[0-9]+, [$]16" "ADD [$]-4," for x := <-c; gte4(x) && lt20(x); x = <-c { } } diff --git a/test/codegen/ifaces.go b/test/codegen/ifaces.go index cc67a047405..59139ceab34 100644 --- a/test/codegen/ifaces.go +++ b/test/codegen/ifaces.go @@ -21,20 +21,20 @@ func NopConvertGeneric[T any](x T) T { var NopConvertGenericIface = NopConvertGeneric[I] func ConvToM(x any) I { - // amd64:`CALL\truntime.typeAssert`,`MOVL\t16\(.*\)`,`MOVQ\t8\(.*\)(.*\*1)` - // arm64:`CALL\truntime.typeAssert`,`LDAR`,`MOVWU`,`MOVD\t\(R.*\)\(R.*\)` + // amd64:`CALL runtime.typeAssert`,`MOVL 16\(.*\)`,`MOVQ 8\(.*\)(.*\*1)` + // arm64:`CALL runtime.typeAssert`,`LDAR`,`MOVWU`,`MOVD \(R.*\)\(R.*\)` return x.(I) } func e1(x any, y *int) bool { // amd64:-`.*faceeq`,`SETEQ` - // arm64:-`.*faceeq`,`CSET\tEQ` + // arm64:-`.*faceeq`,`CSET EQ` return x == y } func e2(x any, y *int) bool { // amd64:-`.*faceeq`,`SETEQ` - // arm64:-`.*faceeq`,`CSET\tEQ` + // arm64:-`.*faceeq`,`CSET EQ` return y == x } @@ -42,7 +42,7 @@ type E *int func e3(x any, y E) bool { // amd64:-`.*faceeq`,`SETEQ` - // arm64:-`.*faceeq`,`CSET\tEQ` + // arm64:-`.*faceeq`,`CSET EQ` return x == y } @@ -52,12 +52,12 @@ func (t *T) M() {} func i1(x I, y *T) bool { // amd64:-`.*faceeq`,`SETEQ` - // arm64:-`.*faceeq`,`CSET\tEQ` + // arm64:-`.*faceeq`,`CSET EQ` return x == y } func i2(x I, y *T) bool { // amd64:-`.*faceeq`,`SETEQ` - // arm64:-`.*faceeq`,`CSET\tEQ` + // arm64:-`.*faceeq`,`CSET EQ` return y == x } diff --git a/test/codegen/issue25378.go b/test/codegen/issue25378.go index 810a0227222..ddb5cecc401 100644 --- a/test/codegen/issue25378.go +++ b/test/codegen/issue25378.go @@ -14,9 +14,9 @@ var wsp = [256]bool{ } func zeroExtArgByte(ch [2]byte) bool { - return wsp[ch[0]] // amd64:-"MOVBLZX\t..,.." + return wsp[ch[0]] // amd64:-"MOVBLZX ..,.." } func zeroExtArgUint16(ch [2]uint16) bool { - return wsp[ch[0]] // amd64:-"MOVWLZX\t..,.." + return wsp[ch[0]] // amd64:-"MOVWLZX ..,.." } diff --git a/test/codegen/issue52635.go b/test/codegen/issue52635.go index 65f2a021d63..965b0732105 100644 --- a/test/codegen/issue52635.go +++ b/test/codegen/issue52635.go @@ -17,31 +17,31 @@ type T struct { func (t *T) f() { // amd64:-".*runtime.memclrNoHeapPointers" - // amd64:`MOVUPS\tX15,` + // amd64:`MOVUPS X15,` for i := range t.a { t.a[i] = 0 } // amd64:-".*runtime.memclrNoHeapPointers" - // amd64:`MOVUPS\tX15,` + // amd64:`MOVUPS X15,` for i := range *t.a { t.a[i] = 0 } // amd64:-".*runtime.memclrNoHeapPointers" - // amd64:`MOVUPS\tX15,` + // amd64:`MOVUPS X15,` for i := range t.a { (*t.a)[i] = 0 } // amd64:-".*runtime.memclrNoHeapPointers" - // amd64:`MOVUPS\tX15,` + // amd64:`MOVUPS X15,` for i := range *t.a { (*t.a)[i] = 0 } // amd64:-".*runtime.memclrNoHeapPointers" - // amd64:`MOVUPS\tX15,` + // amd64:`MOVUPS X15,` for i := range t.b { t.b[i] = 0 } diff --git a/test/codegen/issue56440.go b/test/codegen/issue56440.go index 826dea85860..3d09688811f 100644 --- a/test/codegen/issue56440.go +++ b/test/codegen/issue56440.go @@ -13,7 +13,7 @@ package codegen func f(x []int) int { s := make([]int, 3) s = append(s, 4, 5) - // amd64:`MOVQ\t40\(.*\),` + // amd64:`MOVQ 40\(.*\),` return x[len(s)] } @@ -26,7 +26,7 @@ func g(x []int, p *bool) int { } s = append(s, 4, 5) if *p { - // amd64:`MOVQ\t40\(.*\),` + // amd64:`MOVQ 40\(.*\),` return x[len(s)] } } diff --git a/test/codegen/issue60324.go b/test/codegen/issue60324.go index d106e7ecf80..ca622910159 100644 --- a/test/codegen/issue60324.go +++ b/test/codegen/issue60324.go @@ -7,30 +7,30 @@ package codegen func main() { - // amd64:"LEAQ\tcommand-line-arguments\\.main\\.f\\.g\\.h\\.func3" + // amd64:"LEAQ command-line-arguments\\.main\\.f\\.g\\.h\\.func3" f(1)() - // amd64:"LEAQ\tcommand-line-arguments\\.main\\.g\\.h\\.func2" + // amd64:"LEAQ command-line-arguments\\.main\\.g\\.h\\.func2" g(2)() - // amd64:"LEAQ\tcommand-line-arguments\\.main\\.h\\.func1" + // amd64:"LEAQ command-line-arguments\\.main\\.h\\.func1" h(3)() - // amd64:"LEAQ\tcommand-line-arguments\\.main\\.f\\.g\\.h\\.func4" + // amd64:"LEAQ command-line-arguments\\.main\\.f\\.g\\.h\\.func4" f(4)() } func f(x int) func() { - // amd64:"LEAQ\tcommand-line-arguments\\.f\\.g\\.h\\.func1" + // amd64:"LEAQ command-line-arguments\\.f\\.g\\.h\\.func1" return g(x) } func g(x int) func() { - // amd64:"LEAQ\tcommand-line-arguments\\.g\\.h\\.func1" + // amd64:"LEAQ command-line-arguments\\.g\\.h\\.func1" return h(x) } func h(x int) func() { - // amd64:"LEAQ\tcommand-line-arguments\\.h\\.func1" + // amd64:"LEAQ command-line-arguments\\.h\\.func1" return func() { recover() } } diff --git a/test/codegen/issue74485.go b/test/codegen/issue74485.go index b5aba9568ca..b2877c822be 100644 --- a/test/codegen/issue74485.go +++ b/test/codegen/issue74485.go @@ -7,41 +7,41 @@ package codegen func divUint64(b uint64) uint64 { - // amd64:"SHRQ [$]63, AX" + // amd64:"SHRQ [$]63, AX" return b / 9223372036854775808 } func divUint32(b uint32) uint32 { - // amd64:"SHRL [$]31, AX" + // amd64:"SHRL [$]31, AX" return b / 2147483648 } func divUint16(b uint16) uint16 { - // amd64:"SHRW [$]15, AX" + // amd64:"SHRW [$]15, AX" return b / 32768 } func divUint8(b uint8) uint8 { - // amd64:"SHRB [$]7, AL" + // amd64:"SHRB [$]7, AL" return b / 128 } func modUint64(b uint64) uint64 { - // amd64:"BTRQ [$]63, AX" + // amd64:"BTRQ [$]63, AX" return b % 9223372036854775808 } func modUint32(b uint32) uint32 { - // amd64:"ANDL [$]2147483647, AX" + // amd64:"ANDL [$]2147483647, AX" return b % 2147483648 } func modUint16(b uint16) uint16 { - // amd64:"ANDL [$]32767, AX" + // amd64:"ANDL [$]32767, AX" return b % 32768 } func modUint8(b uint8) uint8 { - // amd64:"ANDL [$]127, AX" + // amd64:"ANDL [$]127, AX" return b % 128 } diff --git a/test/codegen/load_type_from_itab.go b/test/codegen/load_type_from_itab.go index b47044fcbd7..4d56122c05c 100644 --- a/test/codegen/load_type_from_itab.go +++ b/test/codegen/load_type_from_itab.go @@ -19,6 +19,6 @@ func (*Impl) A() {} func main() { var a M = &Impl{} - // amd64:`LEAQ\ttype:.*Impl` + // amd64:`LEAQ type:.*Impl` a.(A).A() } diff --git a/test/codegen/logic.go b/test/codegen/logic.go index ac33f91dadf..1cd389bd2db 100644 --- a/test/codegen/logic.go +++ b/test/codegen/logic.go @@ -28,14 +28,14 @@ func ornot(x, y int) int { // Verify that (OR (NOT x) (NOT y)) rewrites to (NOT (AND x y)) func orDemorgans(x, y int) int { - // amd64:"AND",-"OR" + // amd64:"AND" -"OR" z := ^x | ^y return z } // Verify that (AND (NOT x) (NOT y)) rewrites to (NOT (OR x y)) func andDemorgans(x, y int) int { - // amd64:"OR",-"AND" + // amd64:"OR" -"AND" z := ^x & ^y return z } diff --git a/test/codegen/maps.go b/test/codegen/maps.go index 860b2c2cbd2..48438eb90c6 100644 --- a/test/codegen/maps.go +++ b/test/codegen/maps.go @@ -16,12 +16,12 @@ package codegen // Direct use of constants in fast map access calls (Issue #19015). func AccessInt1(m map[int]int) int { - // amd64:"MOV[LQ]\t[$]5" + // amd64:"MOV[LQ] [$]5" return m[5] } func AccessInt2(m map[int]int) bool { - // amd64:"MOV[LQ]\t[$]5" + // amd64:"MOV[LQ] [$]5" _, ok := m[5] return ok } @@ -37,6 +37,28 @@ func AccessString2(m map[string]int) bool { return ok } +func AccessStringIntArray2(m map[string][16]int, k string) bool { + // amd64:-"MOVUPS" + _, ok := m[k] + return ok +} + +type Struct struct { + A, B, C, D, E, F, G, H, I, J int +} + +func AccessStringStruct2(m map[string]Struct, k string) bool { + // amd64:-"MOVUPS" + _, ok := m[k] + return ok +} + +func AccessIntArrayLarge2(m map[int][512]int, k int) bool { + // amd64:-"REP",-"MOVSQ" + _, ok := m[k] + return ok +} + // ------------------- // // String Conversion // // ------------------- // @@ -147,7 +169,7 @@ func MapClearSideEffect(m map[int]int) int { func MapLiteralSizing(x int) (map[int]int, map[int]int) { // This is tested for internal/abi/maps.go:MapBucketCountBits={3,4,5} - // amd64:"MOVL\t[$]33," + // amd64:"MOVL [$]33," m := map[int]int{ 0: 0, 1: 1, @@ -183,7 +205,7 @@ func MapLiteralSizing(x int) (map[int]int, map[int]int) { 31: 32, 32: 32, } - // amd64:"MOVL\t[$]33," + // amd64:"MOVL [$]33," n := map[int]int{ 0: 0, 1: 1, diff --git a/test/codegen/math.go b/test/codegen/math.go index eadf9d7d055..ef8c51e6589 100644 --- a/test/codegen/math.go +++ b/test/codegen/math.go @@ -1,4 +1,4 @@ -// asmcheck +// asmcheck -gcflags=-d=converthash=qy // Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style @@ -12,37 +12,37 @@ var sink64 [8]float64 func approx(x float64) { // amd64/v2:-".*x86HasSSE41" amd64/v3:-".*x86HasSSE41" - // amd64:"ROUNDSD\t[$]2" - // s390x:"FIDBR\t[$]6" + // amd64:"ROUNDSD [$]2" + // s390x:"FIDBR [$]6" // arm64:"FRINTPD" // ppc64x:"FRIP" // wasm:"F64Ceil" sink64[0] = math.Ceil(x) // amd64/v2:-".*x86HasSSE41" amd64/v3:-".*x86HasSSE41" - // amd64:"ROUNDSD\t[$]1" - // s390x:"FIDBR\t[$]7" + // amd64:"ROUNDSD [$]1" + // s390x:"FIDBR [$]7" // arm64:"FRINTMD" // ppc64x:"FRIM" // wasm:"F64Floor" sink64[1] = math.Floor(x) - // s390x:"FIDBR\t[$]1" + // s390x:"FIDBR [$]1" // arm64:"FRINTAD" // ppc64x:"FRIN" sink64[2] = math.Round(x) // amd64/v2:-".*x86HasSSE41" amd64/v3:-".*x86HasSSE41" - // amd64:"ROUNDSD\t[$]3" - // s390x:"FIDBR\t[$]5" + // amd64:"ROUNDSD [$]3" + // s390x:"FIDBR [$]5" // arm64:"FRINTZD" // ppc64x:"FRIZ" // wasm:"F64Trunc" sink64[3] = math.Trunc(x) // amd64/v2:-".*x86HasSSE41" amd64/v3:-".*x86HasSSE41" - // amd64:"ROUNDSD\t[$]0" - // s390x:"FIDBR\t[$]4" + // amd64:"ROUNDSD [$]0" + // s390x:"FIDBR [$]4" // arm64:"FRINTND" // wasm:"F64Nearest" sink64[4] = math.RoundToEven(x) @@ -78,43 +78,43 @@ func sqrt32(x float32) float32 { // Check that it's using integer registers func abs(x, y float64) { - // amd64:"BTRQ\t[$]63" - // arm64:"FABSD\t" - // loong64:"ABSD\t" - // s390x:"LPDFR\t",-"MOVD\t" (no integer load/store) - // ppc64x:"FABS\t" - // riscv64:"FABSD\t" + // amd64:"BTRQ [$]63" + // arm64:"FABSD " + // loong64:"ABSD " + // s390x:"LPDFR " -"MOVD " (no integer load/store) + // ppc64x:"FABS " + // riscv64:"FABSD " // wasm:"F64Abs" - // arm/6:"ABSD\t" - // mips64/hardfloat:"ABSD\t" - // mips/hardfloat:"ABSD\t" + // arm/6:"ABSD " + // mips64/hardfloat:"ABSD " + // mips/hardfloat:"ABSD " sink64[0] = math.Abs(x) - // amd64:"BTRQ\t[$]63","PXOR" (TODO: this should be BTSQ) - // s390x:"LNDFR\t",-"MOVD\t" (no integer load/store) - // ppc64x:"FNABS\t" + // amd64:"BTRQ [$]63" "PXOR" (TODO: this should be BTSQ) + // s390x:"LNDFR " -"MOVD " (no integer load/store) + // ppc64x:"FNABS " sink64[1] = -math.Abs(y) } // Check that it's using integer registers func abs32(x float32) float32 { - // s390x:"LPDFR",-"LDEBR",-"LEDBR" (no float64 conversion) + // s390x:"LPDFR" -"LDEBR" -"LEDBR" (no float64 conversion) return float32(math.Abs(float64(x))) } // Check that it's using integer registers func copysign(a, b, c float64) { - // amd64:"BTRQ\t[$]63","ANDQ","ORQ" + // amd64:"BTRQ [$]63" "ANDQ" "ORQ" // loong64:"FCOPYSGD" - // s390x:"CPSDR",-"MOVD" (no integer load/store) + // s390x:"CPSDR" -"MOVD" (no integer load/store) // ppc64x:"FCPSGN" // riscv64:"FSGNJD" // wasm:"F64Copysign" sink64[0] = math.Copysign(a, b) - // amd64:"BTSQ\t[$]63" + // amd64:"BTSQ [$]63" // loong64:"FCOPYSGD" - // s390x:"LNDFR\t",-"MOVD\t" (no integer load/store) + // s390x:"LNDFR " -"MOVD " (no integer load/store) // ppc64x:"FCPSGN" // riscv64:"FSGNJD" // arm64:"ORR", -"AND" @@ -122,12 +122,12 @@ func copysign(a, b, c float64) { // Like math.Copysign(c, -1), but with integer operations. Useful // for platforms that have a copysign opcode to see if it's detected. - // s390x:"LNDFR\t",-"MOVD\t" (no integer load/store) + // s390x:"LNDFR " -"MOVD " (no integer load/store) sink64[2] = math.Float64frombits(math.Float64bits(a) | 1<<63) - // amd64:"ANDQ","ORQ" + // amd64:"ANDQ" "ORQ" // loong64:"FCOPYSGD" - // s390x:"CPSDR\t",-"MOVD\t" (no integer load/store) + // s390x:"CPSDR " -"MOVD " (no integer load/store) // ppc64x:"FCPSGN" // riscv64:"FSGNJD" sink64[3] = math.Copysign(-1, c) @@ -151,12 +151,12 @@ func fms(x, y, z float64) float64 { } func fnms(x, y, z float64) float64 { - // riscv64:"FNMSUBD",-"FNMADDD" + // riscv64:"FNMSUBD" -"FNMADDD" return math.FMA(-x, y, z) } func fnma(x, y, z float64) float64 { - // riscv64:"FNMADDD",-"FNMSUBD" + // riscv64:"FNMADDD" -"FNMSUBD" return math.FMA(x, -y, -z) } @@ -221,39 +221,39 @@ func isNotNegInfCmp(x float64) bool { } func fromFloat64(f64 float64) uint64 { - // amd64:"MOVQ\tX.*, [^X].*" - // arm64:"FMOVD\tF.*, R.*" - // loong64:"MOVV\tF.*, R.*" + // amd64:"MOVQ X.*, [^X].*" + // arm64:"FMOVD F.*, R.*" + // loong64:"MOVV F.*, R.*" // ppc64x:"MFVSRD" - // mips64/hardfloat:"MOVV\tF.*, R.*" + // mips64/hardfloat:"MOVV F.*, R.*" // riscv64:"FMVXD" return math.Float64bits(f64+1) + 1 } func fromFloat32(f32 float32) uint32 { - // amd64:"MOVL\tX.*, [^X].*" - // arm64:"FMOVS\tF.*, R.*" - // loong64:"MOVW\tF.*, R.*" - // mips64/hardfloat:"MOVW\tF.*, R.*" + // amd64:"MOVL X.*, [^X].*" + // arm64:"FMOVS F.*, R.*" + // loong64:"MOVW F.*, R.*" + // mips64/hardfloat:"MOVW F.*, R.*" // riscv64:"FMVXW" return math.Float32bits(f32+1) + 1 } func toFloat64(u64 uint64) float64 { - // amd64:"MOVQ\t[^X].*, X.*" - // arm64:"FMOVD\tR.*, F.*" - // loong64:"MOVV\tR.*, F.*" + // amd64:"MOVQ [^X].*, X.*" + // arm64:"FMOVD R.*, F.*" + // loong64:"MOVV R.*, F.*" // ppc64x:"MTVSRD" - // mips64/hardfloat:"MOVV\tR.*, F.*" + // mips64/hardfloat:"MOVV R.*, F.*" // riscv64:"FMVDX" return math.Float64frombits(u64+1) + 1 } func toFloat32(u32 uint32) float32 { - // amd64:"MOVL\t[^X].*, X.*" - // arm64:"FMOVS\tR.*, F.*" - // loong64:"MOVW\tR.*, F.*" - // mips64/hardfloat:"MOVW\tR.*, F.*" + // amd64:"MOVL [^X].*, X.*" + // arm64:"FMOVS R.*, F.*" + // loong64:"MOVW R.*, F.*" + // mips64/hardfloat:"MOVW R.*, F.*" // riscv64:"FMVWX" return math.Float32frombits(u32+1) + 1 } @@ -262,14 +262,14 @@ func toFloat32(u32 uint32) float32 { // are evaluated at compile-time func constantCheck64() bool { - // amd64:"(MOVB\t[$]0)|(XORL\t[A-Z][A-Z0-9]+, [A-Z][A-Z0-9]+)",-"FCMP",-"MOVB\t[$]1" - // s390x:"MOV(B|BZ|D)\t[$]0,",-"FCMPU",-"MOV(B|BZ|D)\t[$]1," + // amd64:"(MOVB [$]0)|(XORL [A-Z][A-Z0-9]+, [A-Z][A-Z0-9]+)" -"FCMP" -"MOVB [$]1" + // s390x:"MOV(B|BZ|D) [$]0," -"FCMPU" -"MOV(B|BZ|D) [$]1," return 0.5 == float64(uint32(1)) || 1.5 > float64(uint64(1<<63)) } func constantCheck32() bool { - // amd64:"MOV(B|L)\t[$]1",-"FCMP",-"MOV(B|L)\t[$]0" - // s390x:"MOV(B|BZ|D)\t[$]1,",-"FCMPU",-"MOV(B|BZ|D)\t[$]0," + // amd64:"MOV(B|L) [$]1" -"FCMP" -"MOV(B|L) [$]0" + // s390x:"MOV(B|BZ|D) [$]1," -"FCMPU" -"MOV(B|BZ|D) [$]0," return float32(0.5) <= float32(int64(1)) && float32(1.5) >= float32(int32(-1<<31)) } @@ -277,12 +277,12 @@ func constantCheck32() bool { // at compile-time func constantConvert32(x float32) float32 { - // amd64:"MOVSS\t[$]f32.3f800000\\(SB\\)" - // s390x:"FMOVS\t[$]f32.3f800000\\(SB\\)" - // ppc64x/power8:"FMOVS\t[$]f32.3f800000\\(SB\\)" - // ppc64x/power9:"FMOVS\t[$]f32.3f800000\\(SB\\)" - // ppc64x/power10:"XXSPLTIDP\t[$]1065353216, VS0" - // arm64:"FMOVS\t[$]\\(1.0\\)" + // amd64:"MOVSS [$]f32.3f800000\\(SB\\)" + // s390x:"FMOVS [$]f32.3f800000\\(SB\\)" + // ppc64x/power8:"FMOVS [$]f32.3f800000\\(SB\\)" + // ppc64x/power9:"FMOVS [$]f32.3f800000\\(SB\\)" + // ppc64x/power10:"XXSPLTIDP [$]1065353216, VS0" + // arm64:"FMOVS [$]\\(1.0\\)" if x > math.Float32frombits(0x3f800000) { return -x } @@ -330,3 +330,48 @@ func nanGenerate32() float32 { // amd64/v3:"VFMADD231SS" return z0 + z1 } + +func outOfBoundsConv(i32 *[2]int32, u32 *[2]uint32, i64 *[2]int64, u64 *[2]uint64) { + // arm64: "FCVTZSDW" + // amd64: "CVTTSD2SL", "CVTSD2SS" + i32[0] = int32(two40()) + // arm64: "FCVTZSDW" + // amd64: "CVTTSD2SL", "CVTSD2SS" + i32[1] = int32(-two40()) + // arm64: "FCVTZSDW" + // amd64: "CVTTSD2SL", "CVTSD2SS" + u32[0] = uint32(two41()) + // on arm64, this uses an explicit <0 comparison, so it constant folds. + // on amd64, this uses an explicit <0 comparison, so it constant folds. + // amd64: "MOVL [$]0," + u32[1] = uint32(minus1()) + // arm64: "FCVTZSD" + // amd64: "CVTTSD2SQ" + i64[0] = int64(two80()) + // arm64: "FCVTZSD" + // amd64: "CVTTSD2SQ" + i64[1] = int64(-two80()) + // arm64: "FCVTZUD" + // amd64: "CVTTSD2SQ" + u64[0] = uint64(two81()) + // arm64: "FCVTZUD" + // on amd64, this uses an explicit <0 comparison, so it constant folds. + // amd64: "MOVQ [$]0," + u64[1] = uint64(minus1()) +} + +func two40() float64 { + return 1 << 40 +} +func two41() float64 { + return 1 << 41 +} +func two80() float64 { + return 1 << 80 +} +func two81() float64 { + return 1 << 81 +} +func minus1() float64 { + return -1 +} diff --git a/test/codegen/mathbits.go b/test/codegen/mathbits.go index ba5387d2c32..6f7014688ac 100644 --- a/test/codegen/mathbits.go +++ b/test/codegen/mathbits.go @@ -20,10 +20,10 @@ func LeadingZeros(n uint) int { // amd64/v3:"LZCNTQ", -"BSRQ" // arm64:"CLZ" // arm:"CLZ" - // loong64:"CLZV",-"SUB" + // loong64:"CLZV" -"SUB" // mips:"CLZ" // ppc64x:"CNTLZD" - // riscv64/rva22u64,riscv64/rva23u64:"CLZ\t",-"SUB" + // riscv64/rva22u64,riscv64/rva23u64:"CLZ " -"SUB" // s390x:"FLOGR" // wasm:"I64Clz" return bits.LeadingZeros(n) @@ -34,52 +34,52 @@ func LeadingZeros64(n uint64) int { // amd64/v3:"LZCNTQ", -"BSRQ" // arm:"CLZ" // arm64:"CLZ" - // loong64:"CLZV",-"SUB" + // loong64:"CLZV" -"SUB" // mips:"CLZ" // ppc64x:"CNTLZD" - // riscv64/rva22u64,riscv64/rva23u64:"CLZ\t",-"ADDI" + // riscv64/rva22u64,riscv64/rva23u64:"CLZ " -"ADDI" // s390x:"FLOGR" // wasm:"I64Clz" return bits.LeadingZeros64(n) } func LeadingZeros32(n uint32) int { - // amd64/v1,amd64/v2:"BSRQ","LEAQ",-"CMOVQEQ" - // amd64/v3: "LZCNTL",- "BSRL" + // amd64/v1,amd64/v2:"BSRQ" "LEAQ" -"CMOVQEQ" + // amd64/v3: "LZCNTL" - "BSRL" // arm:"CLZ" // arm64:"CLZW" - // loong64:"CLZW",-"SUB" + // loong64:"CLZW" -"SUB" // mips:"CLZ" // ppc64x:"CNTLZW" - // riscv64/rva22u64,riscv64/rva23u64:"CLZW",-"ADDI" + // riscv64/rva22u64,riscv64/rva23u64:"CLZW" -"ADDI" // s390x:"FLOGR" // wasm:"I64Clz" return bits.LeadingZeros32(n) } func LeadingZeros16(n uint16) int { - // amd64/v1,amd64/v2:"BSRL","LEAL",-"CMOVQEQ" - // amd64/v3: "LZCNTL",- "BSRL" + // amd64/v1,amd64/v2:"BSRL" "LEAL" -"CMOVQEQ" + // amd64/v3: "LZCNTL" - "BSRL" // arm64:"CLZ" // arm:"CLZ" // loong64:"CLZV" // mips:"CLZ" // ppc64x:"CNTLZD" - // riscv64/rva22u64,riscv64/rva23u64:"CLZ\t","ADDI\t\\$-48",-"NEG" + // riscv64/rva22u64,riscv64/rva23u64:"CLZ " "ADDI [$]-48" -"NEG" // s390x:"FLOGR" // wasm:"I64Clz" return bits.LeadingZeros16(n) } func LeadingZeros8(n uint8) int { - // amd64/v1,amd64/v2:"BSRL","LEAL",-"CMOVQEQ" - // amd64/v3: "LZCNTL",- "BSRL" + // amd64/v1,amd64/v2:"BSRL" "LEAL" -"CMOVQEQ" + // amd64/v3: "LZCNTL" - "BSRL" // arm64:"CLZ" // arm:"CLZ" // loong64:"CLZV" // mips:"CLZ" // ppc64x:"CNTLZD" - // riscv64/rva22u64,riscv64/rva23u64:"CLZ\t","ADDI\t\\$-56",-"NEG" + // riscv64/rva22u64,riscv64/rva23u64:"CLZ " "ADDI [$]-56" -"NEG" // s390x:"FLOGR" // wasm:"I64Clz" return bits.LeadingZeros8(n) @@ -96,8 +96,8 @@ func Len(n uint) int { // arm:"CLZ" // loong64:"CLZV" // mips:"CLZ" - // ppc64x:"SUBC","CNTLZD" - // riscv64/rva22u64,riscv64/rva23u64:"CLZ\t","ADDI\t\\$-64" + // ppc64x:"SUBC" "CNTLZD" + // riscv64/rva22u64,riscv64/rva23u64:"CLZ " "ADDI [$]-64" // s390x:"FLOGR" // wasm:"I64Clz" return bits.Len(n) @@ -110,62 +110,62 @@ func Len64(n uint64) int { // arm:"CLZ" // loong64:"CLZV" // mips:"CLZ" - // ppc64x:"SUBC","CNTLZD" - // riscv64/rva22u64,riscv64/rva23u64:"CLZ\t","ADDI\t\\$-64" + // ppc64x:"SUBC" "CNTLZD" + // riscv64/rva22u64,riscv64/rva23u64:"CLZ " "ADDI [$]-64" // s390x:"FLOGR" // wasm:"I64Clz" return bits.Len64(n) } func SubFromLen64(n uint64) int { - // loong64:"CLZV",-"ADD" - // ppc64x:"CNTLZD",-"SUBC" - // riscv64/rva22u64,riscv64/rva23u64:"CLZ\t",-"ADDI",-"NEG" + // loong64:"CLZV" -"ADD" + // ppc64x:"CNTLZD" -"SUBC" + // riscv64/rva22u64,riscv64/rva23u64:"CLZ " -"ADDI" -"NEG" return 64 - bits.Len64(n) } func CompareWithLen64(n uint64) bool { - // loong64:"CLZV",-"ADD",-"[$]64",-"[$]9" + // loong64:"CLZV" -"ADD" -"[$]64" -"[$]9" return bits.Len64(n) < 9 } func Len32(n uint32) int { - // amd64/v1,amd64/v2:"BSRQ","LEAQ",-"CMOVQEQ" + // amd64/v1,amd64/v2:"BSRQ" "LEAQ" -"CMOVQEQ" // amd64/v3: "LZCNTL" // arm64:"CLZ" // arm:"CLZ" // loong64:"CLZW" // mips:"CLZ" // ppc64x: "CNTLZW" - // riscv64/rva22u64,riscv64/rva23u64:"CLZW","ADDI\t\\$-32" + // riscv64/rva22u64,riscv64/rva23u64:"CLZW" "ADDI [$]-32" // s390x:"FLOGR" // wasm:"I64Clz" return bits.Len32(n) } func Len16(n uint16) int { - // amd64/v1,amd64/v2:"BSRL","LEAL",-"CMOVQEQ" + // amd64/v1,amd64/v2:"BSRL" "LEAL" -"CMOVQEQ" // amd64/v3: "LZCNTL" // arm64:"CLZ" // arm:"CLZ" // loong64:"CLZV" // mips:"CLZ" - // ppc64x:"SUBC","CNTLZD" - // riscv64/rva22u64,riscv64/rva23u64:"CLZ\t","ADDI\t\\$-64" + // ppc64x:"SUBC" "CNTLZD" + // riscv64/rva22u64,riscv64/rva23u64:"CLZ " "ADDI [$]-64" // s390x:"FLOGR" // wasm:"I64Clz" return bits.Len16(n) } func Len8(n uint8) int { - // amd64/v1,amd64/v2:"BSRL","LEAL",-"CMOVQEQ" + // amd64/v1,amd64/v2:"BSRL" "LEAL" -"CMOVQEQ" // amd64/v3: "LZCNTL" // arm64:"CLZ" // arm:"CLZ" // loong64:"CLZV" // mips:"CLZ" - // ppc64x:"SUBC","CNTLZD" - // riscv64/rva22u64,riscv64/rva23u64:"CLZ\t","ADDI\t\\$-64" + // ppc64x:"SUBC" "CNTLZD" + // riscv64/rva22u64,riscv64/rva23u64:"CLZ " "ADDI [$]-64" // s390x:"FLOGR" // wasm:"I64Clz" return bits.Len8(n) @@ -179,10 +179,10 @@ func Len8(n uint8) int { func OnesCount(n uint) int { // amd64/v2:-".*x86HasPOPCNT" amd64/v3:-".*x86HasPOPCNT" // amd64:"POPCNTQ" - // arm64:"VCNT","VUADDLV" + // arm64:"VCNT" "VUADDLV" // loong64:"VPCNTV" // ppc64x:"POPCNTD" - // riscv64:"CPOP\t" + // riscv64:"CPOP " // s390x:"POPCNT" // wasm:"I64Popcnt" return bits.OnesCount(n) @@ -191,10 +191,10 @@ func OnesCount(n uint) int { func OnesCount64(n uint64) int { // amd64/v2:-".*x86HasPOPCNT" amd64/v3:-".*x86HasPOPCNT" // amd64:"POPCNTQ" - // arm64:"VCNT","VUADDLV" + // arm64:"VCNT" "VUADDLV" // loong64:"VPCNTV" // ppc64x:"POPCNTD" - // riscv64:"CPOP\t" + // riscv64:"CPOP " // s390x:"POPCNT" // wasm:"I64Popcnt" return bits.OnesCount64(n) @@ -203,7 +203,7 @@ func OnesCount64(n uint64) int { func OnesCount32(n uint32) int { // amd64/v2:-".*x86HasPOPCNT" amd64/v3:-".*x86HasPOPCNT" // amd64:"POPCNTL" - // arm64:"VCNT","VUADDLV" + // arm64:"VCNT" "VUADDLV" // loong64:"VPCNTW" // ppc64x:"POPCNTW" // riscv64:"CPOPW" @@ -215,10 +215,10 @@ func OnesCount32(n uint32) int { func OnesCount16(n uint16) int { // amd64/v2:-".*x86HasPOPCNT" amd64/v3:-".*x86HasPOPCNT" // amd64:"POPCNTL" - // arm64:"VCNT","VUADDLV" + // arm64:"VCNT" "VUADDLV" // loong64:"VPCNTH" // ppc64x:"POPCNTW" - // riscv64:"CPOP\t" + // riscv64:"CPOP " // s390x:"POPCNT" // wasm:"I64Popcnt" return bits.OnesCount16(n) @@ -226,7 +226,7 @@ func OnesCount16(n uint16) int { func OnesCount8(n uint8) int { // ppc64x:"POPCNTB" - // riscv64/rva22u64,riscv64/rva23u64:"CPOP\t" + // riscv64/rva22u64,riscv64/rva23u64:"CPOP " // s390x:"POPCNT" // wasm:"I64Popcnt" return bits.OnesCount8(n) @@ -252,7 +252,7 @@ func Reverse32(n uint32) uint32 { } func Reverse16(n uint16) uint16 { - // loong64:"BITREV4B","REVB2H" + // loong64:"BITREV4B" "REVB2H" return bits.Reverse16(n) } @@ -292,20 +292,20 @@ func ReverseBytes32(n uint32) uint32 { // arm64:"REVW" // loong64:"REVB2W" // ppc64x/power10: "BRW" - // riscv64/rva22u64,riscv64/rva23u64:"REV8","SRLI\t\\$32" + // riscv64/rva22u64,riscv64/rva23u64:"REV8" "SRLI [$]32" // s390x:"MOVWBR" return bits.ReverseBytes32(n) } func ReverseBytes16(n uint16) uint16 { // amd64:"ROLW" - // arm/5:"SLL","SRL","ORR" + // arm/5:"SLL" "SRL" "ORR" // arm/6:"REV16" // arm/7:"REV16" - // arm64:"REV16W",-"UBFX",-"ORR" + // arm64:"REV16W" -"UBFX" -"ORR" // loong64:"REVB2H" // ppc64x/power10: "BRH" - // riscv64/rva22u64,riscv64/rva23u64:"REV8","SRLI\t\\$48" + // riscv64/rva22u64,riscv64/rva23u64:"REV8" "SRLI [$]48" return bits.ReverseBytes16(n) } @@ -319,16 +319,16 @@ func RotateLeft64(n uint64) uint64 { // loong64:"ROTRV" // ppc64x:"ROTL" // riscv64:"RORI" - // s390x:"RISBGZ\t[$]0, [$]63, [$]37, " + // s390x:"RISBGZ [$]0, [$]63, [$]37, " // wasm:"I64Rotl" return bits.RotateLeft64(n, 37) } func RotateLeft32(n uint32) uint32 { // amd64:"ROLL" 386:"ROLL" - // arm:`MOVW\tR[0-9]+@>23` + // arm:`MOVW R[0-9]+@>23` // arm64:"RORW" - // loong64:"ROTR\t" + // loong64:"ROTR " // ppc64x:"ROTLW" // riscv64:"RORIW" // s390x:"RLL" @@ -338,15 +338,15 @@ func RotateLeft32(n uint32) uint32 { func RotateLeft16(n uint16, s int) uint16 { // amd64:"ROLW" 386:"ROLW" - // arm64:"RORW",-"CSEL" - // loong64:"ROTR\t","SLLV" + // arm64:"RORW" -"CSEL" + // loong64:"ROTR " "SLLV" return bits.RotateLeft16(n, s) } func RotateLeft8(n uint8, s int) uint8 { // amd64:"ROLB" 386:"ROLB" - // arm64:"LSL","LSR",-"CSEL" - // loong64:"OR","SLLV","SRLV" + // arm64:"LSL" "LSR" -"CSEL" + // loong64:"OR" "SLLV" "SRLV" return bits.RotateLeft8(n, s) } @@ -373,10 +373,10 @@ func RotateLeftVariable64(n uint64, m int) uint64 { } func RotateLeftVariable32(n uint32, m int) uint32 { - // arm:`MOVW\tR[0-9]+@>R[0-9]+` + // arm:`MOVW R[0-9]+@>R[0-9]+` // amd64:"ROLL" // arm64:"RORW" - // loong64:"ROTR\t" + // loong64:"ROTR " // ppc64x:"ROTLW" // riscv64:"ROLW" // s390x:"RLL" @@ -390,78 +390,78 @@ func RotateLeftVariable32(n uint32, m int) uint32 { func TrailingZeros(n uint) int { // 386:"BSFL" - // amd64/v1,amd64/v2:"BSFQ","MOVL\t\\$64","CMOVQEQ" + // amd64/v1,amd64/v2:"BSFQ" "MOVL [$]64" "CMOVQEQ" // amd64/v3:"TZCNTQ" // arm:"CLZ" - // arm64:"RBIT","CLZ" + // arm64:"RBIT" "CLZ" // loong64:"CTZV" - // ppc64x/power8:"ANDN","POPCNTD" + // ppc64x/power8:"ANDN" "POPCNTD" // ppc64x/power9: "CNTTZD" - // riscv64/rva22u64,riscv64/rva23u64: "CTZ\t" + // riscv64/rva22u64,riscv64/rva23u64: "CTZ " // s390x:"FLOGR" // wasm:"I64Ctz" return bits.TrailingZeros(n) } func TrailingZeros64(n uint64) int { - // 386:"BSFL","JNE" - // amd64/v1,amd64/v2:"BSFQ","MOVL\t\\$64","CMOVQEQ" + // 386:"BSFL" "JNE" + // amd64/v1,amd64/v2:"BSFQ" "MOVL [$]64" "CMOVQEQ" // amd64/v3:"TZCNTQ" - // arm64:"RBIT","CLZ" + // arm64:"RBIT" "CLZ" // loong64:"CTZV" - // ppc64x/power8:"ANDN","POPCNTD" + // ppc64x/power8:"ANDN" "POPCNTD" // ppc64x/power9: "CNTTZD" - // riscv64/rva22u64,riscv64/rva23u64: "CTZ\t" + // riscv64/rva22u64,riscv64/rva23u64: "CTZ " // s390x:"FLOGR" // wasm:"I64Ctz" return bits.TrailingZeros64(n) } func TrailingZeros64Subtract(n uint64) int { - // ppc64x/power8:"NEG","SUBC","ANDN","POPCNTD" - // ppc64x/power9:"SUBC","CNTTZD" + // ppc64x/power8:"NEG" "SUBC" "ANDN" "POPCNTD" + // ppc64x/power9:"SUBC" "CNTTZD" return bits.TrailingZeros64(1 - n) } func TrailingZeros32(n uint32) int { // 386:"BSFL" - // amd64/v1,amd64/v2:"BTSQ\\t\\$32","BSFQ" + // amd64/v1,amd64/v2:"BTSQ [$]32" "BSFQ" // amd64/v3:"TZCNTL" // arm:"CLZ" - // arm64:"RBITW","CLZW" + // arm64:"RBITW" "CLZW" // loong64:"CTZW" - // ppc64x/power8:"ANDN","POPCNTW" + // ppc64x/power8:"ANDN" "POPCNTW" // ppc64x/power9: "CNTTZW" // riscv64/rva22u64,riscv64/rva23u64: "CTZW" - // s390x:"FLOGR","MOVWZ" + // s390x:"FLOGR" "MOVWZ" // wasm:"I64Ctz" return bits.TrailingZeros32(n) } func TrailingZeros16(n uint16) int { - // 386:"BSFL\t" - // amd64:"BSFL","ORL\\t\\$65536" - // arm:"ORR\t\\$65536","CLZ",-"MOVHU\tR" - // arm64:"ORR\t\\$65536","RBITW","CLZW",-"MOVHU\tR",-"RBIT\t",-"CLZ\t" + // 386:"BSFL " + // amd64:"BSFL" "ORL [$]65536" + // arm:"ORR [$]65536" "CLZ" -"MOVHU R" + // arm64:"ORR [$]65536" "RBITW" "CLZW" -"MOVHU R" -"RBIT " -"CLZ " // loong64:"CTZV" - // ppc64x/power8:"POPCNTW","ADD\t\\$-1" - // ppc64x/power9:"CNTTZD","ORIS\\t\\$1" - // riscv64/rva22u64,riscv64/rva23u64: "ORI\t\\$65536","CTZW" - // s390x:"FLOGR","OR\t\\$65536" + // ppc64x/power8:"POPCNTW" "ADD [$]-1" + // ppc64x/power9:"CNTTZD" "ORIS [$]1" + // riscv64/rva22u64,riscv64/rva23u64: "ORI [$]65536" "CTZW" + // s390x:"FLOGR" "OR [$]65536" // wasm:"I64Ctz" return bits.TrailingZeros16(n) } func TrailingZeros8(n uint8) int { // 386:"BSFL" - // amd64:"BSFL","ORL\\t\\$256" - // arm:"ORR\t\\$256","CLZ",-"MOVBU\tR" - // arm64:"ORR\t\\$256","RBITW","CLZW",-"MOVBU\tR",-"RBIT\t",-"CLZ\t" + // amd64:"BSFL" "ORL [$]256" + // arm:"ORR [$]256" "CLZ" -"MOVBU R" + // arm64:"ORR [$]256" "RBITW" "CLZW" -"MOVBU R" -"RBIT " -"CLZ " // loong64:"CTZV" - // ppc64x/power8:"POPCNTB","ADD\t\\$-1" - // ppc64x/power9:"CNTTZD","OR\t\\$256" - // riscv64/rva22u64,riscv64/rva23u64: "ORI\t\\$256","CTZW" - // s390x:"FLOGR","OR\t\\$256" + // ppc64x/power8:"POPCNTB" "ADD [$]-1" + // ppc64x/power9:"CNTTZD" "OR [$]256" + // riscv64/rva22u64,riscv64/rva23u64: "ORI [$]256" "CTZW" + // s390x:"FLOGR" "OR [$]256" // wasm:"I64Ctz" return bits.TrailingZeros8(n) } @@ -471,7 +471,7 @@ func TrailingZeros8(n uint8) int { func IterateBits(n uint) int { i := 0 for n != 0 { - // amd64/v1,amd64/v2:"BSFQ",-"CMOVEQ" + // amd64/v1,amd64/v2:"BSFQ" -"CMOVEQ" // amd64/v3:"TZCNTQ" i += bits.TrailingZeros(n) n &= n - 1 @@ -482,9 +482,9 @@ func IterateBits(n uint) int { func IterateBits64(n uint64) int { i := 0 for n != 0 { - // amd64/v1,amd64/v2:"BSFQ",-"CMOVEQ" + // amd64/v1,amd64/v2:"BSFQ" -"CMOVEQ" // amd64/v3:"TZCNTQ" - // riscv64/rva22u64,riscv64/rva23u64: "CTZ\t" + // riscv64/rva22u64,riscv64/rva23u64: "CTZ " i += bits.TrailingZeros64(n) n &= n - 1 } @@ -494,9 +494,9 @@ func IterateBits64(n uint64) int { func IterateBits32(n uint32) int { i := 0 for n != 0 { - // amd64/v1,amd64/v2:"BSFL",-"BTSQ" + // amd64/v1,amd64/v2:"BSFL" -"BTSQ" // amd64/v3:"TZCNTL" - // riscv64/rva22u64,riscv64/rva23u64: "CTZ\t" + // riscv64/rva22u64,riscv64/rva23u64: "CTZ " i += bits.TrailingZeros32(n) n &= n - 1 } @@ -506,10 +506,10 @@ func IterateBits32(n uint32) int { func IterateBits16(n uint16) int { i := 0 for n != 0 { - // amd64/v1,amd64/v2:"BSFL",-"BTSL" + // amd64/v1,amd64/v2:"BSFL" -"BTSL" // amd64/v3:"TZCNTL" - // arm64:"RBITW","CLZW",-"ORR" - // riscv64/rva22u64,riscv64/rva23u64: "CTZ\t",-"ORR" + // arm64:"RBITW" "CLZW" -"ORR" + // riscv64/rva22u64,riscv64/rva23u64: "CTZ " -"ORR" i += bits.TrailingZeros16(n) n &= n - 1 } @@ -519,10 +519,10 @@ func IterateBits16(n uint16) int { func IterateBits8(n uint8) int { i := 0 for n != 0 { - // amd64/v1,amd64/v2:"BSFL",-"BTSL" + // amd64/v1,amd64/v2:"BSFL" -"BTSL" // amd64/v3:"TZCNTL" - // arm64:"RBITW","CLZW",-"ORR" - // riscv64/rva22u64,riscv64/rva23u64: "CTZ\t",-"ORR" + // arm64:"RBITW" "CLZW" -"ORR" + // riscv64/rva22u64,riscv64/rva23u64: "CTZ " -"ORR" i += bits.TrailingZeros8(n) n &= n - 1 } @@ -534,44 +534,44 @@ func IterateBits8(n uint8) int { // --------------- // func Add(x, y, ci uint) (r, co uint) { - // arm64:"ADDS","ADCS","ADC",-"ADD\t",-"CMP" - // amd64:"NEGL","ADCQ","SBBQ","NEGQ" + // arm64:"ADDS" "ADCS" "ADC" -"ADD " -"CMP" + // amd64:"NEGL" "ADCQ" "SBBQ" "NEGQ" // ppc64x: "ADDC", "ADDE", "ADDZE" - // s390x:"ADDE","ADDC\t[$]-1," - // riscv64: "ADD","SLTU" + // s390x:"ADDE" "ADDC [$]-1," + // riscv64: "ADD" "SLTU" return bits.Add(x, y, ci) } func AddC(x, ci uint) (r, co uint) { - // arm64:"ADDS","ADCS","ADC",-"ADD\t",-"CMP" - // amd64:"NEGL","ADCQ","SBBQ","NEGQ" + // arm64:"ADDS" "ADCS" "ADC" -"ADD " -"CMP" + // amd64:"NEGL" "ADCQ" "SBBQ" "NEGQ" // loong64: "ADDV", "SGTU" // ppc64x: "ADDC", "ADDE", "ADDZE" - // s390x:"ADDE","ADDC\t[$]-1," - // mips64:"ADDV","SGTU" - // riscv64: "ADD","SLTU" + // s390x:"ADDE" "ADDC [$]-1," + // mips64:"ADDV" "SGTU" + // riscv64: "ADD" "SLTU" return bits.Add(x, 7, ci) } func AddZ(x, y uint) (r, co uint) { - // arm64:"ADDS","ADC",-"ADCS",-"ADD\t",-"CMP" - // amd64:"ADDQ","SBBQ","NEGQ",-"NEGL",-"ADCQ" + // arm64:"ADDS" "ADC" -"ADCS" -"ADD " -"CMP" + // amd64:"ADDQ" "SBBQ" "NEGQ" -"NEGL" -"ADCQ" // loong64: "ADDV", "SGTU" // ppc64x: "ADDC", -"ADDE", "ADDZE" - // s390x:"ADDC",-"ADDC\t[$]-1," - // mips64:"ADDV","SGTU" - // riscv64: "ADD","SLTU" + // s390x:"ADDC" -"ADDC [$]-1," + // mips64:"ADDV" "SGTU" + // riscv64: "ADD" "SLTU" return bits.Add(x, y, 0) } func AddR(x, y, ci uint) uint { - // arm64:"ADDS","ADCS",-"ADD\t",-"CMP" - // amd64:"NEGL","ADCQ",-"SBBQ",-"NEGQ" + // arm64:"ADDS" "ADCS" -"ADD " -"CMP" + // amd64:"NEGL" "ADCQ" -"SBBQ" -"NEGQ" // loong64: "ADDV", -"SGTU" // ppc64x: "ADDC", "ADDE", -"ADDZE" - // s390x:"ADDE","ADDC\t[$]-1," - // mips64:"ADDV",-"SGTU" - // riscv64: "ADD",-"SLTU" + // s390x:"ADDE" "ADDC [$]-1," + // mips64:"ADDV" -"SGTU" + // riscv64: "ADD" -"SLTU" r, _ := bits.Add(x, y, ci) return r } @@ -579,54 +579,54 @@ func AddR(x, y, ci uint) uint { func AddM(p, q, r *[3]uint) { var c uint r[0], c = bits.Add(p[0], q[0], c) - // arm64:"ADCS",-"ADD\t",-"CMP" - // amd64:"ADCQ",-"NEGL",-"SBBQ",-"NEGQ" - // s390x:"ADDE",-"ADDC\t[$]-1," + // arm64:"ADCS" -"ADD " -"CMP" + // amd64:"ADCQ" -"NEGL" -"SBBQ" -"NEGQ" + // s390x:"ADDE" -"ADDC [$]-1," r[1], c = bits.Add(p[1], q[1], c) r[2], c = bits.Add(p[2], q[2], c) } func Add64(x, y, ci uint64) (r, co uint64) { - // arm64:"ADDS","ADCS","ADC",-"ADD\t",-"CMP" - // amd64:"NEGL","ADCQ","SBBQ","NEGQ" + // arm64:"ADDS" "ADCS" "ADC" -"ADD " -"CMP" + // amd64:"NEGL" "ADCQ" "SBBQ" "NEGQ" // loong64: "ADDV", "SGTU" // ppc64x: "ADDC", "ADDE", "ADDZE" - // s390x:"ADDE","ADDC\t[$]-1," - // mips64:"ADDV","SGTU" - // riscv64: "ADD","SLTU" + // s390x:"ADDE" "ADDC [$]-1," + // mips64:"ADDV" "SGTU" + // riscv64: "ADD" "SLTU" return bits.Add64(x, y, ci) } func Add64C(x, ci uint64) (r, co uint64) { - // arm64:"ADDS","ADCS","ADC",-"ADD\t",-"CMP" - // amd64:"NEGL","ADCQ","SBBQ","NEGQ" + // arm64:"ADDS" "ADCS" "ADC" -"ADD " -"CMP" + // amd64:"NEGL" "ADCQ" "SBBQ" "NEGQ" // loong64: "ADDV", "SGTU" // ppc64x: "ADDC", "ADDE", "ADDZE" - // s390x:"ADDE","ADDC\t[$]-1," - // mips64:"ADDV","SGTU" - // riscv64: "ADD","SLTU" + // s390x:"ADDE" "ADDC [$]-1," + // mips64:"ADDV" "SGTU" + // riscv64: "ADD" "SLTU" return bits.Add64(x, 7, ci) } func Add64Z(x, y uint64) (r, co uint64) { - // arm64:"ADDS","ADC",-"ADCS",-"ADD\t",-"CMP" - // amd64:"ADDQ","SBBQ","NEGQ",-"NEGL",-"ADCQ" + // arm64:"ADDS" "ADC" -"ADCS" -"ADD " -"CMP" + // amd64:"ADDQ" "SBBQ" "NEGQ" -"NEGL" -"ADCQ" // loong64: "ADDV", "SGTU" // ppc64x: "ADDC", -"ADDE", "ADDZE" - // s390x:"ADDC",-"ADDC\t[$]-1," - // mips64:"ADDV","SGTU" - // riscv64: "ADD","SLTU" + // s390x:"ADDC" -"ADDC [$]-1," + // mips64:"ADDV" "SGTU" + // riscv64: "ADD" "SLTU" return bits.Add64(x, y, 0) } func Add64R(x, y, ci uint64) uint64 { - // arm64:"ADDS","ADCS",-"ADD\t",-"CMP" - // amd64:"NEGL","ADCQ",-"SBBQ",-"NEGQ" + // arm64:"ADDS" "ADCS" -"ADD " -"CMP" + // amd64:"NEGL" "ADCQ" -"SBBQ" -"NEGQ" // loong64: "ADDV", -"SGTU" // ppc64x: "ADDC", "ADDE", -"ADDZE" - // s390x:"ADDE","ADDC\t[$]-1," - // mips64:"ADDV",-"SGTU" - // riscv64: "ADD",-"SLTU" + // s390x:"ADDE" "ADDC [$]-1," + // mips64:"ADDV" -"SGTU" + // riscv64: "ADD" -"SLTU" r, _ := bits.Add64(x, y, ci) return r } @@ -634,10 +634,10 @@ func Add64R(x, y, ci uint64) uint64 { func Add64M(p, q, r *[3]uint64) { var c uint64 r[0], c = bits.Add64(p[0], q[0], c) - // arm64:"ADCS",-"ADD\t",-"CMP" - // amd64:"ADCQ",-"NEGL",-"SBBQ",-"NEGQ" + // arm64:"ADCS" -"ADD " -"CMP" + // amd64:"ADCQ" -"NEGL" -"SBBQ" -"NEGQ" // ppc64x: -"ADDC", "ADDE", -"ADDZE" - // s390x:"ADDE",-"ADDC\t[$]-1," + // s390x:"ADDE" -"ADDC [$]-1," r[1], c = bits.Add64(p[1], q[1], c) r[2], c = bits.Add64(p[2], q[2], c) } @@ -645,22 +645,22 @@ func Add64M(p, q, r *[3]uint64) { func Add64M0(p, q, r *[3]uint64) { var c uint64 r[0], c = bits.Add64(p[0], q[0], 0) - // ppc64x: -"ADDC", -"ADDE", "ADDZE\tR[1-9]" + // ppc64x: -"ADDC", -"ADDE", "ADDZE R[1-9]" r[1], c = bits.Add64(p[1], 0, c) // ppc64x: -"ADDC", "ADDE", -"ADDZE" r[2], c = bits.Add64(p[2], p[2], c) } func Add64MSaveC(p, q, r, c *[2]uint64) { - // ppc64x: "ADDC\tR", "ADDZE" + // ppc64x: "ADDC R", "ADDZE" r[0], c[0] = bits.Add64(p[0], q[0], 0) - // ppc64x: "ADDC\t[$]-1", "ADDE", "ADDZE" + // ppc64x: "ADDC [$]-1", "ADDE", "ADDZE" r[1], c[1] = bits.Add64(p[1], q[1], c[0]) } func Add64PanicOnOverflowEQ(a, b uint64) uint64 { r, c := bits.Add64(a, b, 0) - // s390x:"BRC\t[$]3,",-"ADDE" + // s390x:"BRC [$]3," -"ADDE" if c == 1 { panic("overflow") } @@ -669,7 +669,7 @@ func Add64PanicOnOverflowEQ(a, b uint64) uint64 { func Add64PanicOnOverflowNE(a, b uint64) uint64 { r, c := bits.Add64(a, b, 0) - // s390x:"BRC\t[$]3,",-"ADDE" + // s390x:"BRC [$]3," -"ADDE" if c != 0 { panic("overflow") } @@ -678,7 +678,7 @@ func Add64PanicOnOverflowNE(a, b uint64) uint64 { func Add64PanicOnOverflowGT(a, b uint64) uint64 { r, c := bits.Add64(a, b, 0) - // s390x:"BRC\t[$]3,",-"ADDE" + // s390x:"BRC [$]3," -"ADDE" if c > 0 { panic("overflow") } @@ -690,7 +690,7 @@ func Add64MPanicOnOverflowEQ(a, b [2]uint64) [2]uint64 { var c uint64 r[0], c = bits.Add64(a[0], b[0], c) r[1], c = bits.Add64(a[1], b[1], c) - // s390x:"BRC\t[$]3," + // s390x:"BRC [$]3," if c == 1 { panic("overflow") } @@ -702,7 +702,7 @@ func Add64MPanicOnOverflowNE(a, b [2]uint64) [2]uint64 { var c uint64 r[0], c = bits.Add64(a[0], b[0], c) r[1], c = bits.Add64(a[1], b[1], c) - // s390x:"BRC\t[$]3," + // s390x:"BRC [$]3," if c != 0 { panic("overflow") } @@ -714,7 +714,7 @@ func Add64MPanicOnOverflowGT(a, b [2]uint64) [2]uint64 { var c uint64 r[0], c = bits.Add64(a[0], b[0], c) r[1], c = bits.Add64(a[1], b[1], c) - // s390x:"BRC\t[$]3," + // s390x:"BRC [$]3," if c > 0 { panic("overflow") } @@ -731,23 +731,24 @@ func Add64MPanicOnOverflowGT(a, b [2]uint64) [2]uint64 { // // This is what happened on PPC64 when compiling // crypto/internal/edwards25519/field.feMulGeneric. -func Add64MultipleChains(a, b, c, d [2]uint64) { +func Add64MultipleChains(a, b, c, d [2]uint64) [2]uint64 { var cx, d1, d2 uint64 a1, a2 := a[0], a[1] b1, b2 := b[0], b[1] c1, c2 := c[0], c[1] - // ppc64x: "ADDC\tR\\d+,", -"ADDE", -"MOVD\tXER" + // ppc64x: "ADDC R\\d+,", -"ADDE", -"MOVD XER" d1, cx = bits.Add64(a1, b1, 0) - // ppc64x: "ADDE", -"ADDC", -"MOVD\t.*, XER" + // ppc64x: "ADDE", -"ADDC", -"MOVD .*, XER" d2, _ = bits.Add64(a2, b2, cx) - // ppc64x: "ADDC\tR\\d+,", -"ADDE", -"MOVD\tXER" + // ppc64x: "ADDC R\\d+,", -"ADDE", -"MOVD XER" d1, cx = bits.Add64(c1, d1, 0) - // ppc64x: "ADDE", -"ADDC", -"MOVD\t.*, XER" + // ppc64x: "ADDE", -"ADDC", -"MOVD .*, XER" d2, _ = bits.Add64(c2, d2, cx) d[0] = d1 d[1] = d2 + return d } // --------------- // @@ -755,53 +756,53 @@ func Add64MultipleChains(a, b, c, d [2]uint64) { // --------------- // func Sub(x, y, ci uint) (r, co uint) { - // amd64:"NEGL","SBBQ","NEGQ" - // arm64:"NEGS","SBCS","NGC","NEG",-"ADD",-"SUB",-"CMP" - // loong64:"SUBV","SGTU" + // amd64:"NEGL" "SBBQ" "NEGQ" + // arm64:"NEGS" "SBCS" "NGC" "NEG" -"ADD" -"SUB" -"CMP" + // loong64:"SUBV" "SGTU" // ppc64x:"SUBC", "SUBE", "SUBZE", "NEG" // s390x:"SUBE" - // mips64:"SUBV","SGTU" - // riscv64: "SUB","SLTU" + // mips64:"SUBV" "SGTU" + // riscv64: "SUB" "SLTU" return bits.Sub(x, y, ci) } func SubC(x, ci uint) (r, co uint) { - // amd64:"NEGL","SBBQ","NEGQ" - // arm64:"NEGS","SBCS","NGC","NEG",-"ADD",-"SUB",-"CMP" - // loong64:"SUBV","SGTU" + // amd64:"NEGL" "SBBQ" "NEGQ" + // arm64:"NEGS" "SBCS" "NGC" "NEG" -"ADD" -"SUB" -"CMP" + // loong64:"SUBV" "SGTU" // ppc64x:"SUBC", "SUBE", "SUBZE", "NEG" // s390x:"SUBE" - // mips64:"SUBV","SGTU" - // riscv64: "SUB","SLTU" + // mips64:"SUBV" "SGTU" + // riscv64: "SUB" "SLTU" return bits.Sub(x, 7, ci) } func SubZ(x, y uint) (r, co uint) { - // amd64:"SUBQ","SBBQ","NEGQ",-"NEGL" - // arm64:"SUBS","NGC","NEG",-"SBCS",-"ADD",-"SUB\t",-"CMP" - // loong64:"SUBV","SGTU" + // amd64:"SUBQ" "SBBQ" "NEGQ" -"NEGL" + // arm64:"SUBS" "NGC" "NEG" -"SBCS" -"ADD" -"SUB " -"CMP" + // loong64:"SUBV" "SGTU" // ppc64x:"SUBC", -"SUBE", "SUBZE", "NEG" // s390x:"SUBC" - // mips64:"SUBV","SGTU" - // riscv64: "SUB","SLTU" + // mips64:"SUBV" "SGTU" + // riscv64: "SUB" "SLTU" return bits.Sub(x, y, 0) } func SubR(x, y, ci uint) uint { - // amd64:"NEGL","SBBQ",-"NEGQ" - // arm64:"NEGS","SBCS",-"NGC",-"NEG\t",-"ADD",-"SUB",-"CMP" - // loong64:"SUBV",-"SGTU" + // amd64:"NEGL" "SBBQ" -"NEGQ" + // arm64:"NEGS" "SBCS" -"NGC" -"NEG " -"ADD" -"SUB" -"CMP" + // loong64:"SUBV" -"SGTU" // ppc64x:"SUBC", "SUBE", -"SUBZE", -"NEG" // s390x:"SUBE" - // riscv64: "SUB",-"SLTU" + // riscv64: "SUB" -"SLTU" r, _ := bits.Sub(x, y, ci) return r } func SubM(p, q, r *[3]uint) { var c uint r[0], c = bits.Sub(p[0], q[0], c) - // amd64:"SBBQ",-"NEGL",-"NEGQ" - // arm64:"SBCS",-"NEGS",-"NGC",-"NEG",-"ADD",-"SUB",-"CMP" + // amd64:"SBBQ" -"NEGL" -"NEGQ" + // arm64:"SBCS" -"NEGS" -"NGC" -"NEG" -"ADD" -"SUB" -"CMP" // ppc64x:-"SUBC", "SUBE", -"SUBZE", -"NEG" // s390x:"SUBE" r[1], c = bits.Sub(p[1], q[1], c) @@ -809,68 +810,68 @@ func SubM(p, q, r *[3]uint) { } func Sub64(x, y, ci uint64) (r, co uint64) { - // amd64:"NEGL","SBBQ","NEGQ" - // arm64:"NEGS","SBCS","NGC","NEG",-"ADD",-"SUB",-"CMP" - // loong64:"SUBV","SGTU" + // amd64:"NEGL" "SBBQ" "NEGQ" + // arm64:"NEGS" "SBCS" "NGC" "NEG" -"ADD" -"SUB" -"CMP" + // loong64:"SUBV" "SGTU" // ppc64x:"SUBC", "SUBE", "SUBZE", "NEG" // s390x:"SUBE" - // mips64:"SUBV","SGTU" - // riscv64: "SUB","SLTU" + // mips64:"SUBV" "SGTU" + // riscv64: "SUB" "SLTU" return bits.Sub64(x, y, ci) } func Sub64C(x, ci uint64) (r, co uint64) { - // amd64:"NEGL","SBBQ","NEGQ" - // arm64:"NEGS","SBCS","NGC","NEG",-"ADD",-"SUB",-"CMP" - // loong64:"SUBV","SGTU" + // amd64:"NEGL" "SBBQ" "NEGQ" + // arm64:"NEGS" "SBCS" "NGC" "NEG" -"ADD" -"SUB" -"CMP" + // loong64:"SUBV" "SGTU" // ppc64x:"SUBC", "SUBE", "SUBZE", "NEG" // s390x:"SUBE" - // mips64:"SUBV","SGTU" - // riscv64: "SUB","SLTU" + // mips64:"SUBV" "SGTU" + // riscv64: "SUB" "SLTU" return bits.Sub64(x, 7, ci) } func Sub64Z(x, y uint64) (r, co uint64) { - // amd64:"SUBQ","SBBQ","NEGQ",-"NEGL" - // arm64:"SUBS","NGC","NEG",-"SBCS",-"ADD",-"SUB\t",-"CMP" - // loong64:"SUBV","SGTU" + // amd64:"SUBQ" "SBBQ" "NEGQ" -"NEGL" + // arm64:"SUBS" "NGC" "NEG" -"SBCS" -"ADD" -"SUB " -"CMP" + // loong64:"SUBV" "SGTU" // ppc64x:"SUBC", -"SUBE", "SUBZE", "NEG" // s390x:"SUBC" - // mips64:"SUBV","SGTU" - // riscv64: "SUB","SLTU" + // mips64:"SUBV" "SGTU" + // riscv64: "SUB" "SLTU" return bits.Sub64(x, y, 0) } func Sub64R(x, y, ci uint64) uint64 { - // amd64:"NEGL","SBBQ",-"NEGQ" - // arm64:"NEGS","SBCS",-"NGC",-"NEG\t",-"ADD",-"SUB",-"CMP" - // loong64:"SUBV",-"SGTU" + // amd64:"NEGL" "SBBQ" -"NEGQ" + // arm64:"NEGS" "SBCS" -"NGC" -"NEG " -"ADD" -"SUB" -"CMP" + // loong64:"SUBV" -"SGTU" // ppc64x:"SUBC", "SUBE", -"SUBZE", -"NEG" // s390x:"SUBE" - // riscv64: "SUB",-"SLTU" + // riscv64: "SUB" -"SLTU" r, _ := bits.Sub64(x, y, ci) return r } func Sub64M(p, q, r *[3]uint64) { var c uint64 r[0], c = bits.Sub64(p[0], q[0], c) - // amd64:"SBBQ",-"NEGL",-"NEGQ" - // arm64:"SBCS",-"NEGS",-"NGC",-"NEG",-"ADD",-"SUB",-"CMP" + // amd64:"SBBQ" -"NEGL" -"NEGQ" + // arm64:"SBCS" -"NEGS" -"NGC" -"NEG" -"ADD" -"SUB" -"CMP" // s390x:"SUBE" r[1], c = bits.Sub64(p[1], q[1], c) r[2], c = bits.Sub64(p[2], q[2], c) } func Sub64MSaveC(p, q, r, c *[2]uint64) { - // ppc64x:"SUBC\tR\\d+, R\\d+,", "SUBZE", "NEG" + // ppc64x:"SUBC R\\d+, R\\d+,", "SUBZE", "NEG" r[0], c[0] = bits.Sub64(p[0], q[0], 0) - // ppc64x:"SUBC\tR\\d+, [$]0,", "SUBE", "SUBZE", "NEG" + // ppc64x:"SUBC R\\d+, [$]0,", "SUBE", "SUBZE", "NEG" r[1], c[1] = bits.Sub64(p[1], q[1], c[0]) } func Sub64PanicOnOverflowEQ(a, b uint64) uint64 { r, b := bits.Sub64(a, b, 0) - // s390x:"BRC\t[$]12,",-"ADDE",-"SUBE" + // s390x:"BRC [$]12," -"ADDE" -"SUBE" if b == 1 { panic("overflow") } @@ -879,7 +880,7 @@ func Sub64PanicOnOverflowEQ(a, b uint64) uint64 { func Sub64PanicOnOverflowNE(a, b uint64) uint64 { r, b := bits.Sub64(a, b, 0) - // s390x:"BRC\t[$]12,",-"ADDE",-"SUBE" + // s390x:"BRC [$]12," -"ADDE" -"SUBE" if b != 0 { panic("overflow") } @@ -888,7 +889,7 @@ func Sub64PanicOnOverflowNE(a, b uint64) uint64 { func Sub64PanicOnOverflowGT(a, b uint64) uint64 { r, b := bits.Sub64(a, b, 0) - // s390x:"BRC\t[$]12,",-"ADDE",-"SUBE" + // s390x:"BRC [$]12," -"ADDE" -"SUBE" if b > 0 { panic("overflow") } @@ -900,7 +901,7 @@ func Sub64MPanicOnOverflowEQ(a, b [2]uint64) [2]uint64 { var c uint64 r[0], c = bits.Sub64(a[0], b[0], c) r[1], c = bits.Sub64(a[1], b[1], c) - // s390x:"BRC\t[$]12," + // s390x:"BRC [$]12," if c == 1 { panic("overflow") } @@ -912,7 +913,7 @@ func Sub64MPanicOnOverflowNE(a, b [2]uint64) [2]uint64 { var c uint64 r[0], c = bits.Sub64(a[0], b[0], c) r[1], c = bits.Sub64(a[1], b[1], c) - // s390x:"BRC\t[$]12," + // s390x:"BRC [$]12," if c != 0 { panic("overflow") } @@ -924,7 +925,7 @@ func Sub64MPanicOnOverflowGT(a, b [2]uint64) [2]uint64 { var c uint64 r[0], c = bits.Sub64(a[0], b[0], c) r[1], c = bits.Sub64(a[1], b[1], c) - // s390x:"BRC\t[$]12," + // s390x:"BRC [$]12," if c > 0 { panic("overflow") } @@ -937,51 +938,51 @@ func Sub64MPanicOnOverflowGT(a, b [2]uint64) [2]uint64 { func Mul(x, y uint) (hi, lo uint) { // amd64:"MULQ" - // arm64:"UMULH","MUL" - // loong64:"MULV","MULHVU" - // ppc64x:"MULHDU","MULLD" + // arm64:"UMULH" "MUL" + // loong64:"MULV" "MULHVU" + // ppc64x:"MULHDU" "MULLD" // s390x:"MLGR" // mips64: "MULVU" - // riscv64:"MULHU","MUL" + // riscv64:"MULHU" "MUL" return bits.Mul(x, y) } func Mul64(x, y uint64) (hi, lo uint64) { // amd64:"MULQ" - // arm64:"UMULH","MUL" - // loong64:"MULV","MULHVU" - // ppc64x:"MULHDU","MULLD" + // arm64:"UMULH" "MUL" + // loong64:"MULV" "MULHVU" + // ppc64x:"MULHDU" "MULLD" // s390x:"MLGR" // mips64: "MULVU" - // riscv64:"MULHU","MUL" + // riscv64:"MULHU" "MUL" return bits.Mul64(x, y) } func Mul64HiOnly(x, y uint64) uint64 { - // arm64:"UMULH",-"MUL" - // loong64:"MULHVU",-"MULV" - // riscv64:"MULHU",-"MUL\t" + // arm64:"UMULH" -"MUL" + // loong64:"MULHVU" -"MULV" + // riscv64:"MULHU" -"MUL " hi, _ := bits.Mul64(x, y) return hi } func Mul64LoOnly(x, y uint64) uint64 { - // arm64:"MUL",-"UMULH" - // loong64:"MULV",-"MULHVU" - // riscv64:"MUL\t",-"MULHU" + // arm64:"MUL" -"UMULH" + // loong64:"MULV" -"MULHVU" + // riscv64:"MUL " -"MULHU" _, lo := bits.Mul64(x, y) return lo } func Mul64Const() (uint64, uint64) { // 7133701809754865664 == 99<<56 - // arm64:"MOVD\t[$]7133701809754865664, R1", "MOVD\t[$]88, R0" - // loong64:"MOVV\t[$]88, R4","MOVV\t[$]7133701809754865664, R5",-"MUL" + // arm64:"MOVD [$]7133701809754865664, R1", "MOVD [$]88, R0" + // loong64:"MOVV [$]88, R4" "MOVV [$]7133701809754865664, R5" -"MUL" return bits.Mul64(99+88<<8, 1<<56) } func MulUintOverflow(p *uint64) []uint64 { - // arm64:"CMP\t[$]72" + // arm64:"CMP [$]72" return unsafe.Slice(p, 9) } @@ -995,7 +996,7 @@ func Div(hi, lo, x uint) (q, r uint) { } func Div32(hi, lo, x uint32) (q, r uint32) { - // arm64:"ORR","UDIV","MSUB",-"UREM" + // arm64:"ORR" "UDIV" "MSUB" -"UREM" return bits.Div32(hi, lo, x) } diff --git a/test/codegen/memcombine.go b/test/codegen/memcombine.go index fa0e902ac2b..deac9e2091e 100644 --- a/test/codegen/memcombine.go +++ b/test/codegen/memcombine.go @@ -16,7 +16,7 @@ import ( // ------------- // func load_le64(b []byte) uint64 { - // amd64:`MOVQ\s\(.*\),`,-`MOV[BWL]\t[^$]`,-`OR` + // amd64:`MOVQ\s\(.*\),`,-`MOV[BWL] [^$]`,-`OR` // s390x:`MOVDBR\s\(.*\),` // arm64:`MOVD\s\(R[0-9]+\),`,-`MOV[BHW]` // loong64:`MOVV\s\(R[0-9]+\),` @@ -26,7 +26,7 @@ func load_le64(b []byte) uint64 { } func load_le64_idx(b []byte, idx int) uint64 { - // amd64:`MOVQ\s\(.*\)\(.*\*1\),`,-`MOV[BWL]\t[^$]`,-`OR` + // amd64:`MOVQ\s\(.*\)\(.*\*1\),`,-`MOV[BWL] [^$]`,-`OR` // s390x:`MOVDBR\s\(.*\)\(.*\*1\),` // arm64:`MOVD\s\(R[0-9]+\)\(R[0-9]+\),`,-`MOV[BHW]` // loong64:`MOVV\s\(R[0-9]+\)\(R[0-9]+\),` @@ -78,7 +78,7 @@ func load_le16_idx(b []byte, idx int) uint16 { } func load_be64(b []byte) uint64 { - // amd64/v1,amd64/v2:`BSWAPQ`,-`MOV[BWL]\t[^$]`,-`OR` + // amd64/v1,amd64/v2:`BSWAPQ`,-`MOV[BWL] [^$]`,-`OR` // amd64/v3:`MOVBEQ` // s390x:`MOVD\s\(.*\),` // arm64:`REV`,`MOVD\s\(R[0-9]+\),`,-`MOV[BHW]`,-`REVW`,-`REV16W` @@ -88,8 +88,8 @@ func load_be64(b []byte) uint64 { } func load_be64_idx(b []byte, idx int) uint64 { - // amd64/v1,amd64/v2:`BSWAPQ`,-`MOV[BWL]\t[^$]`,-`OR` - // amd64/v3: `MOVBEQ\t\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\), [A-Z]+[0-9]*` + // amd64/v1,amd64/v2:`BSWAPQ`,-`MOV[BWL] [^$]`,-`OR` + // amd64/v3: `MOVBEQ \([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\), [A-Z]+[0-9]*` // s390x:`MOVD\s\(.*\)\(.*\*1\),` // arm64:`REV`,`MOVD\s\(R[0-9]+\)\(R[0-9]+\),`,-`MOV[WHB]`,-`REVW`,-`REV16W` // ppc64le:`MOVDBR`,-`MOV[BHW]Z` @@ -109,7 +109,7 @@ func load_be32(b []byte) uint32 { func load_be32_idx(b []byte, idx int) uint32 { // amd64/v1,amd64/v2:`BSWAPL`,-`MOV[BW]`,-`OR` - // amd64/v3: `MOVBEL\t\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\), [A-Z]+[0-9]*` + // amd64/v3: `MOVBEL \([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\), [A-Z]+[0-9]*` // s390x:`MOVWZ\s\(.*\)\(.*\*1\),` // arm64:`REVW`,`MOVWU\s\(R[0-9]+\)\(R[0-9]+\),`,-`MOV[HB]`,-`REV16W` // ppc64le:`MOVWBR`,-`MOV[BH]Z` @@ -136,79 +136,79 @@ func load_be16_idx(b []byte, idx int) uint16 { } func load_le_byte2_uint16(s []byte) uint16 { - // arm64:`MOVHU\t\(R[0-9]+\)`,-`ORR`,-`MOVB` + // arm64:`MOVHU \(R[0-9]+\)`,-`ORR`,-`MOVB` // 386:`MOVWLZX\s\([A-Z]+\)`,-`MOVB`,-`OR` // amd64:`MOVWLZX\s\([A-Z]+\)`,-`MOVB`,-`OR` - // ppc64le:`MOVHZ\t\(R[0-9]+\)`,-`MOVBZ` + // ppc64le:`MOVHZ \(R[0-9]+\)`,-`MOVBZ` // ppc64:`MOVHBR`,-`MOVBZ` return uint16(s[0]) | uint16(s[1])<<8 } func load_le_byte2_uint16_inv(s []byte) uint16 { - // arm64:`MOVHU\t\(R[0-9]+\)`,-`ORR`,-`MOVB` + // arm64:`MOVHU \(R[0-9]+\)`,-`ORR`,-`MOVB` // 386:`MOVWLZX\s\([A-Z]+\)`,-`MOVB`,-`OR` // amd64:`MOVWLZX\s\([A-Z]+\)`,-`MOVB`,-`OR` - // ppc64le:`MOVHZ\t\(R[0-9]+\)`,-`MOVBZ` + // ppc64le:`MOVHZ \(R[0-9]+\)`,-`MOVBZ` // ppc64:`MOVHBR`,-`MOVBZ` return uint16(s[1])<<8 | uint16(s[0]) } func load_le_byte4_uint32(s []byte) uint32 { - // arm64:`MOVWU\t\(R[0-9]+\)`,-`ORR`,-`MOV[BH]` + // arm64:`MOVWU \(R[0-9]+\)`,-`ORR`,-`MOV[BH]` // 386:`MOVL\s\([A-Z]+\)`,-`MOV[BW]`,-`OR` // amd64:`MOVL\s\([A-Z]+\)`,-`MOV[BW]`,-`OR` - // ppc64le:`MOVWZ\t\(R[0-9]+\)`,-`MOV[BH]Z` + // ppc64le:`MOVWZ \(R[0-9]+\)`,-`MOV[BH]Z` // ppc64:`MOVWBR`,-MOV[BH]Z` return uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 | uint32(s[3])<<24 } func load_le_byte4_uint32_inv(s []byte) uint32 { - // arm64:`MOVWU\t\(R[0-9]+\)`,-`ORR`,-`MOV[BH]` + // arm64:`MOVWU \(R[0-9]+\)`,-`ORR`,-`MOV[BH]` // ppc64le:`MOVWZ`,-`MOV[BH]Z` // ppc64:`MOVWBR`,-`MOV[BH]Z` return uint32(s[3])<<24 | uint32(s[2])<<16 | uint32(s[1])<<8 | uint32(s[0]) } func load_le_byte8_uint64(s []byte) uint64 { - // arm64:`MOVD\t\(R[0-9]+\)`,-`ORR`,-`MOV[BHW]` - // amd64:`MOVQ\s\([A-Z]+\),\s[A-Z]+`,-`MOV[BWL]\t[^$]`,-`OR` - // ppc64le:`MOVD\t\(R[0-9]+\)`,-`MOV[BHW]Z` + // arm64:`MOVD \(R[0-9]+\)`,-`ORR`,-`MOV[BHW]` + // amd64:`MOVQ\s\([A-Z]+\),\s[A-Z]+`,-`MOV[BWL] [^$]`,-`OR` + // ppc64le:`MOVD \(R[0-9]+\)`,-`MOV[BHW]Z` // ppc64:`MOVDBR`,-`MOVW[WHB]Z` return uint64(s[0]) | uint64(s[1])<<8 | uint64(s[2])<<16 | uint64(s[3])<<24 | uint64(s[4])<<32 | uint64(s[5])<<40 | uint64(s[6])<<48 | uint64(s[7])<<56 } func load_le_byte8_uint64_inv(s []byte) uint64 { - // arm64:`MOVD\t\(R[0-9]+\)`,-`ORR`,-`MOV[BHW]` + // arm64:`MOVD \(R[0-9]+\)`,-`ORR`,-`MOV[BHW]` // ppc64le:`MOVD`,-`MOV[WHB]Z` // ppc64:`MOVDBR`,-`MOV[WHB]Z` return uint64(s[7])<<56 | uint64(s[6])<<48 | uint64(s[5])<<40 | uint64(s[4])<<32 | uint64(s[3])<<24 | uint64(s[2])<<16 | uint64(s[1])<<8 | uint64(s[0]) } func load_be_byte2_uint16(s []byte) uint16 { - // arm64:`MOVHU\t\(R[0-9]+\)`,`REV16W`,-`ORR`,-`MOVB` + // arm64:`MOVHU \(R[0-9]+\)`,`REV16W`,-`ORR`,-`MOVB` // amd64:`MOVWLZX\s\([A-Z]+\)`,`ROLW`,-`MOVB`,-`OR` - // ppc64le:`MOVHBR\t\(R[0-9]+\)`,-`MOVBZ` + // ppc64le:`MOVHBR \(R[0-9]+\)`,-`MOVBZ` // ppc64:`MOVHZ`,-`MOVBZ` return uint16(s[0])<<8 | uint16(s[1]) } func load_be_byte2_uint16_inv(s []byte) uint16 { - // arm64:`MOVHU\t\(R[0-9]+\)`,`REV16W`,-`ORR`,-`MOVB` + // arm64:`MOVHU \(R[0-9]+\)`,`REV16W`,-`ORR`,-`MOVB` // amd64:`MOVWLZX\s\([A-Z]+\)`,`ROLW`,-`MOVB`,-`OR` - // ppc64le:`MOVHBR\t\(R[0-9]+\)`,-`MOVBZ` + // ppc64le:`MOVHBR \(R[0-9]+\)`,-`MOVBZ` // ppc64:`MOVHZ`,-`MOVBZ` return uint16(s[1]) | uint16(s[0])<<8 } func load_be_byte4_uint32(s []byte) uint32 { - // arm64:`MOVWU\t\(R[0-9]+\)`,`REVW`,-`ORR`,-`REV16W`,-`MOV[BH]` + // arm64:`MOVWU \(R[0-9]+\)`,`REVW`,-`ORR`,-`REV16W`,-`MOV[BH]` // ppc64le:`MOVWBR`,-`MOV[HB]Z` // ppc64:`MOVWZ`,-`MOV[HB]Z` return uint32(s[0])<<24 | uint32(s[1])<<16 | uint32(s[2])<<8 | uint32(s[3]) } func load_be_byte4_uint32_inv(s []byte) uint32 { - // arm64:`MOVWU\t\(R[0-9]+\)`,`REVW`,-`ORR`,-`REV16W`,-`MOV[BH]` + // arm64:`MOVWU \(R[0-9]+\)`,`REVW`,-`ORR`,-`REV16W`,-`MOV[BH]` // amd64/v1,amd64/v2:`MOVL\s\([A-Z]+\)`,`BSWAPL`,-`MOV[BW]`,-`OR` // amd64/v3: `MOVBEL` // ppc64le:`MOVWBR`,-`MOV[HB]Z` @@ -217,17 +217,17 @@ func load_be_byte4_uint32_inv(s []byte) uint32 { } func load_be_byte8_uint64(s []byte) uint64 { - // arm64:`MOVD\t\(R[0-9]+\)`,`REV`,-`ORR`,-`REVW`,-`REV16W`,-`MOV[BHW]` - // ppc64le:`MOVDBR\t\(R[0-9]+\)`,-`MOV[BHW]Z` + // arm64:`MOVD \(R[0-9]+\)`,`REV`,-`ORR`,-`REVW`,-`REV16W`,-`MOV[BHW]` + // ppc64le:`MOVDBR \(R[0-9]+\)`,-`MOV[BHW]Z` // ppc64:`MOVD`,-`MOV[WHB]Z` return uint64(s[0])<<56 | uint64(s[1])<<48 | uint64(s[2])<<40 | uint64(s[3])<<32 | uint64(s[4])<<24 | uint64(s[5])<<16 | uint64(s[6])<<8 | uint64(s[7]) } func load_be_byte8_uint64_inv(s []byte) uint64 { - // arm64:`MOVD\t\(R[0-9]+\)`,`REV`,-`ORR`,-`REVW`,-`REV16W`,-`MOV[BHW]` - // amd64/v1,amd64/v2:`MOVQ\s\([A-Z]+\),\s[A-Z]+`,`BSWAPQ`,-`MOV[BWL]\t[^$]`,-`OR` + // arm64:`MOVD \(R[0-9]+\)`,`REV`,-`ORR`,-`REVW`,-`REV16W`,-`MOV[BHW]` + // amd64/v1,amd64/v2:`MOVQ\s\([A-Z]+\),\s[A-Z]+`,`BSWAPQ`,-`MOV[BWL] [^$]`,-`OR` // amd64/v3: `MOVBEQ` - // ppc64le:`MOVDBR\t\(R[0-9]+\)`,-`MOV[BHW]Z` + // ppc64le:`MOVDBR \(R[0-9]+\)`,-`MOV[BHW]Z` // ppc64:`MOVD`,-`MOV[BHW]Z` return uint64(s[7]) | uint64(s[6])<<8 | uint64(s[5])<<16 | uint64(s[4])<<24 | uint64(s[3])<<32 | uint64(s[2])<<40 | uint64(s[1])<<48 | uint64(s[0])<<56 } @@ -386,20 +386,20 @@ func fcall_uint32(a [2]uint32) [2]uint32 { // We want to merge load+op in the first function, but not in the // second. See Issue 19595. func load_op_merge(p, q *int) { - x := *p // amd64:`ADDQ\t\(` + x := *p // amd64:`ADDQ \(` *q += x // The combined nilcheck and load would normally have this line number, but we want that combined operation to have the line number of the nil check instead (see #33724). } func load_op_no_merge(p, q *int) { x := *p for i := 0; i < 10; i++ { - *q += x // amd64:`ADDQ\t[A-Z]` + *q += x // amd64:`ADDQ [A-Z]` } } func load_op_in_loop(a []int) int { r := 0 for _, x := range a { - // amd64:`ADDQ\t\([A-Z]+\)\([A-Z]+\*8\), [A-Z]+` + // amd64:`ADDQ \([A-Z]+\)\([A-Z]+\*8\), [A-Z]+` r += x } return r @@ -407,7 +407,7 @@ func load_op_in_loop(a []int) int { // Make sure offsets are folded into loads and stores. func offsets_fold(_, a [20]byte) (b [20]byte) { - // arm64:`MOVD\tcommand-line-arguments\.a\+[0-9]+\(FP\), R[0-9]+`,`MOVD\tR[0-9]+, command-line-arguments\.b\+[0-9]+\(FP\)` + // arm64:`MOVD command-line-arguments\.a\+[0-9]+\(FP\), R[0-9]+`,`MOVD R[0-9]+, command-line-arguments\.b\+[0-9]+\(FP\)` b = a return } @@ -526,7 +526,7 @@ func store_be64(b []byte, x uint64) { func store_be64_idx(b []byte, x uint64, idx int) { // amd64/v1,amd64/v2:`BSWAPQ`,-`SHR.` - // amd64/v3:`MOVBEQ\t[A-Z]+[0-9]*, \([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\)` + // amd64/v3:`MOVBEQ [A-Z]+[0-9]*, \([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\)` // arm64:`REV`,`MOVD\sR[0-9]+,\s\(R[0-9]+\)\(R[0-9]+\)`,-`MOV[BHW]`,-`REV16W`,-`REVW` // ppc64le:`MOVDBR` // ppc64:`MOVD\s` @@ -558,7 +558,7 @@ func store_be32_load(b, x *[8]byte) { func store_be32_idx(b []byte, x uint32, idx int) { // amd64/v1,amd64/v2:`BSWAPL`,-`SHR.` - // amd64/v3:`MOVBEL\t[A-Z]+[0-9]*, \([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\)` + // amd64/v3:`MOVBEL [A-Z]+[0-9]*, \([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\)` // arm64:`REVW`,`MOVW\sR[0-9]+,\s\(R[0-9]+\)\(R[0-9]+\)`,-`MOV[BH]`,-`REV16W` // ppc64le:`MOVWBR` // ppc64:`MOVW\s` @@ -578,7 +578,7 @@ func store_be16(b []byte, x uint16) { func store_be16_idx(b []byte, x uint16, idx int) { // amd64/v1,amd64/v2:`ROLW\s\$8`,-`SHR.` - // amd64/v3:`MOVBEW\t[A-Z]+[0-9]*, \([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\)` + // amd64/v3:`MOVBEW [A-Z]+[0-9]*, \([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\)` // arm64:`MOVH\sR[0-9]+,\s\(R[0-9]+\)\(R[0-9]+\)`,`REV16W`,-`MOVB` // ppc64le:`MOVHBR` // ppc64:`MOVH\s` @@ -736,12 +736,12 @@ func store_le_byte_4_idx4_inv(b []byte, idx int, val uint32) { func zero_byte_2(b1, b2 []byte) { // bounds checks to guarantee safety of writes below _, _ = b1[1], b2[1] - // arm64:"MOVH\tZR",-"MOVB" + // arm64:"MOVH ZR" -"MOVB" // amd64:`MOVW\s[$]0,\s\([A-Z]+\)` // 386:`MOVW\s[$]0,\s\([A-Z]+\)` // ppc64x:`MOVH\s` b1[0], b1[1] = 0, 0 - // arm64:"MOVH\tZR",-"MOVB" + // arm64:"MOVH ZR" -"MOVB" // 386:`MOVW\s[$]0,\s\([A-Z]+\)` // amd64:`MOVW\s[$]0,\s\([A-Z]+\)` // ppc64x:`MOVH` @@ -750,36 +750,36 @@ func zero_byte_2(b1, b2 []byte) { func zero_byte_4(b1, b2 []byte) { _, _ = b1[3], b2[3] - // arm64:"MOVW\tZR",-"MOVB",-"MOVH" + // arm64:"MOVW ZR" -"MOVB" -"MOVH" // amd64:`MOVL\s[$]0,\s\([A-Z]+\)` // 386:`MOVL\s[$]0,\s\([A-Z]+\)` // ppc64x:`MOVW\s` b1[0], b1[1], b1[2], b1[3] = 0, 0, 0, 0 - // arm64:"MOVW\tZR",-"MOVB",-"MOVH" + // arm64:"MOVW ZR" -"MOVB" -"MOVH" // ppc64x:`MOVW\s` b2[2], b2[3], b2[1], b2[0] = 0, 0, 0, 0 } func zero_byte_8(b []byte) { _ = b[7] - b[0], b[1], b[2], b[3] = 0, 0, 0, 0 // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW" + b[0], b[1], b[2], b[3] = 0, 0, 0, 0 // arm64:"MOVD ZR" -"MOVB" -"MOVH" -"MOVW" b[4], b[5], b[6], b[7] = 0, 0, 0, 0 } func zero_byte_16(b []byte) { _ = b[15] - b[0], b[1], b[2], b[3] = 0, 0, 0, 0 // arm64:"STP",-"MOVB",-"MOVH",-"MOVW" + b[0], b[1], b[2], b[3] = 0, 0, 0, 0 // arm64:"STP" -"MOVB" -"MOVH" -"MOVW" b[4], b[5], b[6], b[7] = 0, 0, 0, 0 b[8], b[9], b[10], b[11] = 0, 0, 0, 0 b[12], b[13], b[14], b[15] = 0, 0, 0, 0 } func zero_byte_30(a *[30]byte) { - *a = [30]byte{} // arm64:"STP",-"MOVB",-"MOVH",-"MOVW" + *a = [30]byte{} // arm64:"STP" -"MOVB" -"MOVH" -"MOVW" } func zero_byte_39(a *[39]byte) { - *a = [39]byte{} // arm64:"MOVD",-"MOVB",-"MOVH",-"MOVW" + *a = [39]byte{} // arm64:"MOVD" -"MOVB" -"MOVH" -"MOVW" } func zero_byte_2_idx(b []byte, idx int) { @@ -798,12 +798,12 @@ func zero_byte_2_idx2(b []byte, idx int) { func zero_uint16_2(h1, h2 []uint16) { _, _ = h1[1], h2[1] - // arm64:"MOVW\tZR",-"MOVB",-"MOVH" + // arm64:"MOVW ZR" -"MOVB" -"MOVH" // amd64:`MOVL\s[$]0,\s\([A-Z]+\)` // 386:`MOVL\s[$]0,\s\([A-Z]+\)` // ppc64x:`MOVW\s` h1[0], h1[1] = 0, 0 - // arm64:"MOVW\tZR",-"MOVB",-"MOVH" + // arm64:"MOVW ZR" -"MOVB" -"MOVH" // amd64:`MOVL\s[$]0,\s\([A-Z]+\)` // 386:`MOVL\s[$]0,\s\([A-Z]+\)` // ppc64x:`MOVW` @@ -812,28 +812,28 @@ func zero_uint16_2(h1, h2 []uint16) { func zero_uint16_4(h1, h2 []uint16) { _, _ = h1[3], h2[3] - // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW" + // arm64:"MOVD ZR" -"MOVB" -"MOVH" -"MOVW" // amd64:`MOVQ\s[$]0,\s\([A-Z]+\)` // ppc64x:`MOVD\s` h1[0], h1[1], h1[2], h1[3] = 0, 0, 0, 0 - // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW" + // arm64:"MOVD ZR" -"MOVB" -"MOVH" -"MOVW" // ppc64x:`MOVD\s` h2[2], h2[3], h2[1], h2[0] = 0, 0, 0, 0 } func zero_uint16_8(h []uint16) { _ = h[7] - h[0], h[1], h[2], h[3] = 0, 0, 0, 0 // arm64:"STP",-"MOVB",-"MOVH" + h[0], h[1], h[2], h[3] = 0, 0, 0, 0 // arm64:"STP" -"MOVB" -"MOVH" h[4], h[5], h[6], h[7] = 0, 0, 0, 0 } func zero_uint32_2(w1, w2 []uint32) { _, _ = w1[1], w2[1] - // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW" + // arm64:"MOVD ZR" -"MOVB" -"MOVH" -"MOVW" // amd64:`MOVQ\s[$]0,\s\([A-Z]+\)` // ppc64x:`MOVD\s` w1[0], w1[1] = 0, 0 - // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW" + // arm64:"MOVD ZR" -"MOVB" -"MOVH" -"MOVW" // amd64:`MOVQ\s[$]0,\s\([A-Z]+\)` // ppc64x:`MOVD\s` w2[1], w2[0] = 0, 0 @@ -841,22 +841,22 @@ func zero_uint32_2(w1, w2 []uint32) { func zero_uint32_4(w1, w2 []uint32) { _, _ = w1[3], w2[3] - w1[0], w1[1], w1[2], w1[3] = 0, 0, 0, 0 // arm64:"STP",-"MOVB",-"MOVH" - w2[2], w2[3], w2[1], w2[0] = 0, 0, 0, 0 // arm64:"STP",-"MOVB",-"MOVH" + w1[0], w1[1], w1[2], w1[3] = 0, 0, 0, 0 // arm64:"STP" -"MOVB" -"MOVH" + w2[2], w2[3], w2[1], w2[0] = 0, 0, 0, 0 // arm64:"STP" -"MOVB" -"MOVH" } func zero_uint64_2(d1, d2 []uint64) { _, _ = d1[1], d2[1] - d1[0], d1[1] = 0, 0 // arm64:"STP",-"MOVB",-"MOVH" - d2[1], d2[0] = 0, 0 // arm64:"STP",-"MOVB",-"MOVH" + d1[0], d1[1] = 0, 0 // arm64:"STP" -"MOVB" -"MOVH" + d2[1], d2[0] = 0, 0 // arm64:"STP" -"MOVB" -"MOVH" } func loadstore(p, q *[4]uint8) { - // amd64:"MOVL",-"MOVB" - // arm64:"MOVWU",-"MOVBU" + // amd64:"MOVL" -"MOVB" + // arm64:"MOVWU" -"MOVBU" x0, x1, x2, x3 := q[0], q[1], q[2], q[3] - // amd64:"MOVL",-"MOVB" - // arm64:"MOVW",-"MOVB" + // amd64:"MOVL" -"MOVB" + // arm64:"MOVW" -"MOVB" p[0], p[1], p[2], p[3] = x0, x1, x2, x3 } @@ -865,11 +865,11 @@ type S1 struct { } func loadstore2(p, q *S1) { - // amd64:"MOVL",-"MOVWLZX" - // arm64:"MOVWU",-"MOVH" + // amd64:"MOVL" -"MOVWLZX" + // arm64:"MOVWU" -"MOVH" a, b := p.a, p.b - // amd64:"MOVL",-"MOVW" - // arm64:"MOVW",-"MOVH" + // amd64:"MOVL" -"MOVW" + // arm64:"MOVW" -"MOVH" q.a, q.b = a, b } @@ -878,11 +878,11 @@ func wideStore(p *[8]uint64) { return } - // amd64:"MOVUPS",-"MOVQ" - // arm64:"STP",-"MOVD" + // amd64:"MOVUPS" -"MOVQ" + // arm64:"STP" -"MOVD" p[0] = 0 - // amd64:-"MOVUPS",-"MOVQ" - // arm64:-"STP",-"MOVD" + // amd64:-"MOVUPS" -"MOVQ" + // arm64:-"STP" -"MOVD" p[1] = 0 } @@ -893,52 +893,52 @@ func wideStore2(p *[8]uint64, x, y uint64) { // s390x:"STMG" p[0] = x - // s390x:-"STMG",-"MOVD" + // s390x:-"STMG" -"MOVD" p[1] = y } func store32le(p *struct{ a, b uint32 }, x uint64) { - // amd64:"MOVQ",-"MOVL",-"SHRQ" - // arm64:"MOVD",-"MOVW",-"LSR" - // ppc64le:"MOVD",-"MOVW",-"SRD" + // amd64:"MOVQ" -"MOVL" -"SHRQ" + // arm64:"MOVD" -"MOVW" -"LSR" + // ppc64le:"MOVD" -"MOVW" -"SRD" p.a = uint32(x) - // amd64:-"MOVL",-"SHRQ" - // arm64:-"MOVW",-"LSR" - // ppc64le:-"MOVW",-"SRD" + // amd64:-"MOVL" -"SHRQ" + // arm64:-"MOVW" -"LSR" + // ppc64le:-"MOVW" -"SRD" p.b = uint32(x >> 32) } func store32be(p *struct{ a, b uint32 }, x uint64) { // arm64:"STPW" - // ppc64:"MOVD",-"MOVW",-"SRD" - // s390x:"MOVD",-"MOVW",-"SRD" + // ppc64:"MOVD" -"MOVW" -"SRD" + // s390x:"MOVD" -"MOVW" -"SRD" p.a = uint32(x >> 32) // arm64:-"STPW" - // ppc64:-"MOVW",-"SRD" - // s390x:-"MOVW",-"SRD" + // ppc64:-"MOVW" -"SRD" + // s390x:-"MOVW" -"SRD" p.b = uint32(x) } func store16le(p *struct{ a, b uint16 }, x uint32) { - // amd64:"MOVL",-"MOVW",-"SHRL" - // arm64:"MOVW",-"MOVH",-"UBFX" - // ppc64le:"MOVW",-"MOVH",-"SRW" + // amd64:"MOVL" -"MOVW" -"SHRL" + // arm64:"MOVW" -"MOVH" -"UBFX" + // ppc64le:"MOVW" -"MOVH" -"SRW" p.a = uint16(x) - // amd64:-"MOVW",-"SHRL" - // arm64:-"MOVH",-"UBFX" - // ppc64le:-"MOVH",-"SRW" + // amd64:-"MOVW" -"SHRL" + // arm64:-"MOVH" -"UBFX" + // ppc64le:-"MOVH" -"SRW" p.b = uint16(x >> 16) } func store16be(p *struct{ a, b uint16 }, x uint32) { - // ppc64:"MOVW",-"MOVH",-"SRW" - // s390x:"MOVW",-"MOVH",-"SRW" + // ppc64:"MOVW" -"MOVH" -"SRW" + // s390x:"MOVW" -"MOVH" -"SRW" p.a = uint16(x >> 16) - // ppc64:-"MOVH",-"SRW" - // s390x:-"MOVH",-"SRW" + // ppc64:-"MOVH" -"SRW" + // s390x:-"MOVH" -"SRW" p.b = uint16(x) } func storeBoolConst(p *struct{ a, b bool }) { - // amd64:"MOVW",-"MOVB" - // arm64:"MOVH",-"MOVB" + // amd64:"MOVW" -"MOVB" + // arm64:"MOVH" -"MOVB" p.a = true p.b = true } @@ -948,8 +948,8 @@ func issue66413(p *struct { c bool d int8 }) { - // amd64:"MOVL",-"MOVB" - // arm64:"MOVW",-"MOVB" + // amd64:"MOVL" -"MOVB" + // arm64:"MOVW" -"MOVB" p.a = 31 p.b = false p.c = true @@ -957,7 +957,7 @@ func issue66413(p *struct { } func issue70300(v uint64) (b [8]byte) { - // amd64:"MOVQ",-"MOVB" + // amd64:"MOVQ" -"MOVB" b[0] = byte(v) b[1] = byte(v >> 8) b[2] = byte(v >> 16) @@ -970,7 +970,7 @@ func issue70300(v uint64) (b [8]byte) { } func issue70300Reverse(v uint64) (b [8]byte) { - // amd64:"MOVQ",-"MOVB" + // amd64:"MOVQ" -"MOVB" b[7] = byte(v >> 56) b[6] = byte(v >> 48) b[5] = byte(v >> 40) @@ -987,43 +987,43 @@ func issue70300Reverse(v uint64) (b [8]byte) { // --------------------------------- // func dwloadI64(p *struct{ a, b int64 }) int64 { - // arm64:"LDP\t" + // arm64:"LDP " return p.a + p.b } func dwloadI32(p *struct{ a, b int32 }) int32 { - // arm64:"LDPSW\t" + // arm64:"LDPSW " return p.a + p.b } func dwloadU32(p *struct{ a, b uint32 }) uint32 { - // arm64:"LDPW\t" + // arm64:"LDPW " return p.a + p.b } func dwloadF64(p *struct{ a, b float64 }) float64 { - // arm64:"FLDPD\t" + // arm64:"FLDPD " return p.a + p.b } func dwloadF32(p *struct{ a, b float32 }) float32 { - // arm64:"FLDPS\t" + // arm64:"FLDPS " return p.a + p.b } func dwloadBig(p *struct{ a, b, c, d, e, f int64 }) int64 { - // arm64:"LDP\t\\(", "LDP\t16", "LDP\t32" + // arm64:"LDP \\(", "LDP 16", "LDP 32" return p.c + p.f + p.a + p.e + p.d + p.b } func dwloadArg(a [2]int64) int64 { - // arm64:"LDP\t" + // arm64:"LDP " return a[0] + a[1] } func dwloadResult1(p *string) string { - // arm64:"LDP\t\\(R0\\), \\(R0, R1\\)" + // arm64:"LDP \\(R0\\), \\(R0, R1\\)" return *p } func dwloadResult2(p *[2]int64) (int64, int64) { - // arm64:"LDP\t\\(R0\\), \\(R1, R0\\)" + // arm64:"LDP \\(R0\\), \\(R1, R0\\)" return p[1], p[0] } @@ -1032,22 +1032,22 @@ func dwloadResult2(p *[2]int64) (int64, int64) { // ---------------------------------- // func dwstoreI64(p *struct{ a, b int64 }, x, y int64) { - // arm64:"STP\t" + // arm64:"STP " p.a = x p.b = y } func dwstoreI32(p *struct{ a, b int32 }, x, y int32) { - // arm64:"STPW\t" + // arm64:"STPW " p.a = x p.b = y } func dwstoreF64(p *struct{ a, b float64 }, x, y float64) { - // arm64:"FSTPD\t" + // arm64:"FSTPD " p.a = x p.b = y } func dwstoreF32(p *struct{ a, b float32 }, x, y float32) { - // arm64:"FSTPS\t" + // arm64:"FSTPS " p.a = x p.b = y } @@ -1065,14 +1065,14 @@ func dwstoreBig(p *struct{ a, b, c, d, e, f int64 }, a, b, c, d, e, f int64) { } func dwstoreRet() [2]int { - // arm64:"STP\t" + // arm64:"STP " return [2]int{5, 6} } func dwstoreLocal(i int) int64 { var a [2]int64 a[0] = 5 - // arm64:"STP\t" + // arm64:"STP " a[1] = 6 return a[i] } @@ -1081,7 +1081,7 @@ func dwstoreOrder(p *struct { a, b int64 c, d, e, f bool }, a, b int64) { - // arm64:"STP\t" + // arm64:"STP " p.a = a p.c = true p.e = true diff --git a/test/codegen/memops.go b/test/codegen/memops.go index e5e89c2acc9..4d7bdfb6850 100644 --- a/test/codegen/memops.go +++ b/test/codegen/memops.go @@ -13,23 +13,23 @@ var x32 [2]uint32 var x64 [2]uint64 func compMem1() int { - // amd64:`CMPB\tcommand-line-arguments.x\+1\(SB\), [$]0` + // amd64:`CMPB command-line-arguments.x\+1\(SB\), [$]0` if x[1] { return 1 } - // amd64:`CMPB\tcommand-line-arguments.x8\+1\(SB\), [$]7` + // amd64:`CMPB command-line-arguments.x8\+1\(SB\), [$]7` if x8[1] == 7 { return 1 } - // amd64:`CMPW\tcommand-line-arguments.x16\+2\(SB\), [$]7` + // amd64:`CMPW command-line-arguments.x16\+2\(SB\), [$]7` if x16[1] == 7 { return 1 } - // amd64:`CMPL\tcommand-line-arguments.x32\+4\(SB\), [$]7` + // amd64:`CMPL command-line-arguments.x32\+4\(SB\), [$]7` if x32[1] == 7 { return 1 } - // amd64:`CMPQ\tcommand-line-arguments.x64\+8\(SB\), [$]7` + // amd64:`CMPQ command-line-arguments.x64\+8\(SB\), [$]7` if x64[1] == 7 { return 1 } @@ -46,23 +46,23 @@ type T struct { } func compMem2(t T) int { - // amd64:`CMPB\t.*\(SP\), [$]0` + // amd64:`CMPB .*\(SP\), [$]0` if t.x { return 1 } - // amd64:`CMPB\t.*\(SP\), [$]7` + // amd64:`CMPB .*\(SP\), [$]7` if t.x8 == 7 { return 1 } - // amd64:`CMPW\t.*\(SP\), [$]7` + // amd64:`CMPW .*\(SP\), [$]7` if t.x16 == 7 { return 1 } - // amd64:`CMPL\t.*\(SP\), [$]7` + // amd64:`CMPL .*\(SP\), [$]7` if t.x32 == 7 { return 1 } - // amd64:`CMPQ\t.*\(SP\), [$]7` + // amd64:`CMPQ .*\(SP\), [$]7` if t.x64 == 7 { return 1 } @@ -73,8 +73,8 @@ func compMem3(x, y *int) (int, bool) { // We can do comparisons of a register with memory even if // the register is used subsequently. r := *x - // amd64:`CMPQ\t\(` - // 386:`CMPL\t\(` + // amd64:`CMPQ \(` + // 386:`CMPL \(` return r, r < *y } @@ -82,261 +82,261 @@ func compMem3(x, y *int) (int, bool) { func idxInt8(x, y []int8, i int) { var t int8 - // amd64: `MOVBL[SZ]X\t1\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\), [A-Z]+[0-9]*` - // 386: `MOVBL[SZ]X\t1\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\), [A-Z]+[0-9]*` + // amd64: `MOVBL[SZ]X 1\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\), [A-Z]+[0-9]*` + // 386: `MOVBL[SZ]X 1\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\), [A-Z]+[0-9]*` t = x[i+1] - // amd64: `MOVB\t[A-Z]+[0-9]*, 1\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\)` - // 386: `MOVB\t[A-Z]+[0-9]*, 1\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\)` + // amd64: `MOVB [A-Z]+[0-9]*, 1\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\)` + // 386: `MOVB [A-Z]+[0-9]*, 1\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\)` y[i+1] = t - // amd64: `MOVB\t[$]77, 1\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\)` - // 386: `MOVB\t[$]77, 1\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\)` + // amd64: `MOVB [$]77, 1\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\)` + // 386: `MOVB [$]77, 1\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\)` x[i+1] = 77 } func idxInt16(x, y []int16, i int) { var t int16 - // amd64: `MOVWL[SZ]X\t2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*2\), [A-Z]+[0-9]*` - // 386: `MOVWL[SZ]X\t2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*2\), [A-Z]+[0-9]*` + // amd64: `MOVWL[SZ]X 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*2\), [A-Z]+[0-9]*` + // 386: `MOVWL[SZ]X 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*2\), [A-Z]+[0-9]*` t = x[i+1] - // amd64: `MOVW\t[A-Z]+[0-9]*, 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*2\)` - // 386: `MOVW\t[A-Z]+[0-9]*, 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*2\)` + // amd64: `MOVW [A-Z]+[0-9]*, 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*2\)` + // 386: `MOVW [A-Z]+[0-9]*, 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*2\)` y[i+1] = t - // amd64: `MOVWL[SZ]X\t2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[12]\), [A-Z]+[0-9]*` - // 386: `MOVWL[SZ]X\t2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[12]\), [A-Z]+[0-9]*` + // amd64: `MOVWL[SZ]X 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[12]\), [A-Z]+[0-9]*` + // 386: `MOVWL[SZ]X 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[12]\), [A-Z]+[0-9]*` t = x[16*i+1] - // amd64: `MOVW\t[A-Z]+[0-9]*, 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[12]\)` - // 386: `MOVW\t[A-Z]+[0-9]*, 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[12]\)` + // amd64: `MOVW [A-Z]+[0-9]*, 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[12]\)` + // 386: `MOVW [A-Z]+[0-9]*, 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[12]\)` y[16*i+1] = t - // amd64: `MOVW\t[$]77, 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*2\)` - // 386: `MOVW\t[$]77, 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*2\)` + // amd64: `MOVW [$]77, 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*2\)` + // 386: `MOVW [$]77, 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*2\)` x[i+1] = 77 - // amd64: `MOVW\t[$]77, 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[12]\)` - // 386: `MOVW\t[$]77, 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[12]\)` + // amd64: `MOVW [$]77, 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[12]\)` + // 386: `MOVW [$]77, 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[12]\)` x[16*i+1] = 77 } func idxInt32(x, y []int32, i int) { var t int32 - // amd64: `MOVL\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` - // 386: `MOVL\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` + // amd64: `MOVL 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` + // 386: `MOVL 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` t = x[i+1] - // amd64: `MOVL\t[A-Z]+[0-9]*, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` - // 386: `MOVL\t[A-Z]+[0-9]*, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` + // amd64: `MOVL [A-Z]+[0-9]*, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` + // 386: `MOVL [A-Z]+[0-9]*, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` y[i+1] = t - // amd64: `MOVL\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` + // amd64: `MOVL 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` t = x[2*i+1] - // amd64: `MOVL\t[A-Z]+[0-9]*, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` + // amd64: `MOVL [A-Z]+[0-9]*, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` y[2*i+1] = t - // amd64: `MOVL\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), [A-Z]+[0-9]*` - // 386: `MOVL\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), [A-Z]+[0-9]*` + // amd64: `MOVL 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), [A-Z]+[0-9]*` + // 386: `MOVL 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), [A-Z]+[0-9]*` t = x[16*i+1] - // amd64: `MOVL\t[A-Z]+[0-9]*, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)` - // 386: `MOVL\t[A-Z]+[0-9]*, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)` + // amd64: `MOVL [A-Z]+[0-9]*, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)` + // 386: `MOVL [A-Z]+[0-9]*, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)` y[16*i+1] = t - // amd64: `MOVL\t[$]77, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` - // 386: `MOVL\t[$]77, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` + // amd64: `MOVL [$]77, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` + // 386: `MOVL [$]77, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` x[i+1] = 77 - // amd64: `MOVL\t[$]77, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)` - // 386: `MOVL\t[$]77, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)` + // amd64: `MOVL [$]77, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)` + // 386: `MOVL [$]77, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)` x[16*i+1] = 77 } func idxInt64(x, y []int64, i int) { var t int64 - // amd64: `MOVQ\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` + // amd64: `MOVQ 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` t = x[i+1] - // amd64: `MOVQ\t[A-Z]+[0-9]*, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` + // amd64: `MOVQ [A-Z]+[0-9]*, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` y[i+1] = t - // amd64: `MOVQ\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\), [A-Z]+[0-9]*` + // amd64: `MOVQ 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\), [A-Z]+[0-9]*` t = x[16*i+1] - // amd64: `MOVQ\t[A-Z]+[0-9]*, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\)` + // amd64: `MOVQ [A-Z]+[0-9]*, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\)` y[16*i+1] = t - // amd64: `MOVQ\t[$]77, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` + // amd64: `MOVQ [$]77, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` x[i+1] = 77 - // amd64: `MOVQ\t[$]77, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\)` + // amd64: `MOVQ [$]77, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\)` x[16*i+1] = 77 } func idxFloat32(x, y []float32, i int) { var t float32 - // amd64: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), X[0-9]+` - // 386/sse2: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), X[0-9]+` - // arm64: `FMOVS\t\(R[0-9]*\)\(R[0-9]*<<2\), F[0-9]+` + // amd64: `MOVSS 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), X[0-9]+` + // 386/sse2: `MOVSS 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), X[0-9]+` + // arm64: `FMOVS \(R[0-9]*\)\(R[0-9]*<<2\), F[0-9]+` t = x[i+1] - // amd64: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` - // 386/sse2: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` - // arm64: `FMOVS\tF[0-9]+, \(R[0-9]*\)\(R[0-9]*<<2\)` + // amd64: `MOVSS X[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` + // 386/sse2: `MOVSS X[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` + // arm64: `FMOVS F[0-9]+, \(R[0-9]*\)\(R[0-9]*<<2\)` y[i+1] = t - // amd64: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), X[0-9]+` - // 386/sse2: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), X[0-9]+` + // amd64: `MOVSS 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), X[0-9]+` + // 386/sse2: `MOVSS 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), X[0-9]+` t = x[16*i+1] - // amd64: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)` - // 386/sse2: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)` + // amd64: `MOVSS X[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)` + // 386/sse2: `MOVSS X[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)` y[16*i+1] = t } func idxFloat64(x, y []float64, i int) { var t float64 - // amd64: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), X[0-9]+` - // 386/sse2: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), X[0-9]+` - // arm64: `FMOVD\t\(R[0-9]*\)\(R[0-9]*<<3\), F[0-9]+` + // amd64: `MOVSD 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), X[0-9]+` + // 386/sse2: `MOVSD 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), X[0-9]+` + // arm64: `FMOVD \(R[0-9]*\)\(R[0-9]*<<3\), F[0-9]+` t = x[i+1] - // amd64: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` - // 386/sse2: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` - // arm64: `FMOVD\tF[0-9]+, \(R[0-9]*\)\(R[0-9]*<<3\)` + // amd64: `MOVSD X[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` + // 386/sse2: `MOVSD X[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` + // arm64: `FMOVD F[0-9]+, \(R[0-9]*\)\(R[0-9]*<<3\)` y[i+1] = t - // amd64: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\), X[0-9]+` - // 386/sse2: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\), X[0-9]+` + // amd64: `MOVSD 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\), X[0-9]+` + // 386/sse2: `MOVSD 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\), X[0-9]+` t = x[16*i+1] - // amd64: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\)` - // 386/sse2: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\)` + // amd64: `MOVSD X[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\)` + // 386/sse2: `MOVSD X[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\)` y[16*i+1] = t } func idxLoadPlusOp32(x []int32, i int) int32 { s := x[0] - // 386: `ADDL\t4\([A-Z]+\)\([A-Z]+\*4\), [A-Z]+` - // amd64: `ADDL\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` + // 386: `ADDL 4\([A-Z]+\)\([A-Z]+\*4\), [A-Z]+` + // amd64: `ADDL 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` s += x[i+1] - // 386: `SUBL\t8\([A-Z]+\)\([A-Z]+\*4\), [A-Z]+` - // amd64: `SUBL\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` + // 386: `SUBL 8\([A-Z]+\)\([A-Z]+\*4\), [A-Z]+` + // amd64: `SUBL 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` s -= x[i+2] - // 386: `IMULL\t12\([A-Z]+\)\([A-Z]+\*4\), [A-Z]+` + // 386: `IMULL 12\([A-Z]+\)\([A-Z]+\*4\), [A-Z]+` s *= x[i+3] - // 386: `ANDL\t16\([A-Z]+\)\([A-Z]+\*4\), [A-Z]+` - // amd64: `ANDL\t16\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` + // 386: `ANDL 16\([A-Z]+\)\([A-Z]+\*4\), [A-Z]+` + // amd64: `ANDL 16\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` s &= x[i+4] - // 386: `ORL\t20\([A-Z]+\)\([A-Z]+\*4\), [A-Z]+` - // amd64: `ORL\t20\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` + // 386: `ORL 20\([A-Z]+\)\([A-Z]+\*4\), [A-Z]+` + // amd64: `ORL 20\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` s |= x[i+5] - // 386: `XORL\t24\([A-Z]+\)\([A-Z]+\*4\), [A-Z]+` - // amd64: `XORL\t24\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` + // 386: `XORL 24\([A-Z]+\)\([A-Z]+\*4\), [A-Z]+` + // amd64: `XORL 24\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` s ^= x[i+6] return s } func idxLoadPlusOp64(x []int64, i int) int64 { s := x[0] - // amd64: `ADDQ\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` + // amd64: `ADDQ 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` s += x[i+1] - // amd64: `SUBQ\t16\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` + // amd64: `SUBQ 16\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` s -= x[i+2] - // amd64: `ANDQ\t24\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` + // amd64: `ANDQ 24\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` s &= x[i+3] - // amd64: `ORQ\t32\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` + // amd64: `ORQ 32\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` s |= x[i+4] - // amd64: `XORQ\t40\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` + // amd64: `XORQ 40\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` s ^= x[i+5] return s } func idxStorePlusOp32(x []int32, i int, v int32) { - // 386: `ADDL\t[A-Z]+, 4\([A-Z]+\)\([A-Z]+\*4\)` - // amd64: `ADDL\t[A-Z]+[0-9]*, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` + // 386: `ADDL [A-Z]+, 4\([A-Z]+\)\([A-Z]+\*4\)` + // amd64: `ADDL [A-Z]+[0-9]*, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` x[i+1] += v - // 386: `SUBL\t[A-Z]+, 8\([A-Z]+\)\([A-Z]+\*4\)` - // amd64: `SUBL\t[A-Z]+[0-9]*, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` + // 386: `SUBL [A-Z]+, 8\([A-Z]+\)\([A-Z]+\*4\)` + // amd64: `SUBL [A-Z]+[0-9]*, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` x[i+2] -= v - // 386: `ANDL\t[A-Z]+, 12\([A-Z]+\)\([A-Z]+\*4\)` - // amd64: `ANDL\t[A-Z]+[0-9]*, 12\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` + // 386: `ANDL [A-Z]+, 12\([A-Z]+\)\([A-Z]+\*4\)` + // amd64: `ANDL [A-Z]+[0-9]*, 12\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` x[i+3] &= v - // 386: `ORL\t[A-Z]+, 16\([A-Z]+\)\([A-Z]+\*4\)` - // amd64: `ORL\t[A-Z]+[0-9]*, 16\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` + // 386: `ORL [A-Z]+, 16\([A-Z]+\)\([A-Z]+\*4\)` + // amd64: `ORL [A-Z]+[0-9]*, 16\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` x[i+4] |= v - // 386: `XORL\t[A-Z]+, 20\([A-Z]+\)\([A-Z]+\*4\)` - // amd64: `XORL\t[A-Z]+[0-9]*, 20\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` + // 386: `XORL [A-Z]+, 20\([A-Z]+\)\([A-Z]+\*4\)` + // amd64: `XORL [A-Z]+[0-9]*, 20\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` x[i+5] ^= v - // 386: `ADDL\t[$]77, 24\([A-Z]+\)\([A-Z]+\*4\)` - // amd64: `ADDL\t[$]77, 24\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` + // 386: `ADDL [$]77, 24\([A-Z]+\)\([A-Z]+\*4\)` + // amd64: `ADDL [$]77, 24\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` x[i+6] += 77 - // 386: `ANDL\t[$]77, 28\([A-Z]+\)\([A-Z]+\*4\)` - // amd64: `ANDL\t[$]77, 28\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` + // 386: `ANDL [$]77, 28\([A-Z]+\)\([A-Z]+\*4\)` + // amd64: `ANDL [$]77, 28\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` x[i+7] &= 77 - // 386: `ORL\t[$]77, 32\([A-Z]+\)\([A-Z]+\*4\)` - // amd64: `ORL\t[$]77, 32\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` + // 386: `ORL [$]77, 32\([A-Z]+\)\([A-Z]+\*4\)` + // amd64: `ORL [$]77, 32\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` x[i+8] |= 77 - // 386: `XORL\t[$]77, 36\([A-Z]+\)\([A-Z]+\*4\)` - // amd64: `XORL\t[$]77, 36\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` + // 386: `XORL [$]77, 36\([A-Z]+\)\([A-Z]+\*4\)` + // amd64: `XORL [$]77, 36\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` x[i+9] ^= 77 } func idxStorePlusOp64(x []int64, i int, v int64) { - // amd64: `ADDQ\t[A-Z]+[0-9]*, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` + // amd64: `ADDQ [A-Z]+[0-9]*, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` x[i+1] += v - // amd64: `SUBQ\t[A-Z]+[0-9]*, 16\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` + // amd64: `SUBQ [A-Z]+[0-9]*, 16\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` x[i+2] -= v - // amd64: `ANDQ\t[A-Z]+[0-9]*, 24\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` + // amd64: `ANDQ [A-Z]+[0-9]*, 24\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` x[i+3] &= v - // amd64: `ORQ\t[A-Z]+[0-9]*, 32\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` + // amd64: `ORQ [A-Z]+[0-9]*, 32\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` x[i+4] |= v - // amd64: `XORQ\t[A-Z]+[0-9]*, 40\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` + // amd64: `XORQ [A-Z]+[0-9]*, 40\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` x[i+5] ^= v - // amd64: `ADDQ\t[$]77, 48\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` + // amd64: `ADDQ [$]77, 48\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` x[i+6] += 77 - // amd64: `ANDQ\t[$]77, 56\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` + // amd64: `ANDQ [$]77, 56\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` x[i+7] &= 77 - // amd64: `ORQ\t[$]77, 64\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` + // amd64: `ORQ [$]77, 64\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` x[i+8] |= 77 - // amd64: `XORQ\t[$]77, 72\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` + // amd64: `XORQ [$]77, 72\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` x[i+9] ^= 77 } func idxCompare(i int) int { - // amd64: `MOVBLZX\t1\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\), [A-Z]+[0-9]*` + // amd64: `MOVBLZX 1\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\), [A-Z]+[0-9]*` if x8[i+1] < x8[0] { return 0 } - // amd64: `MOVWLZX\t2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*2\), [A-Z]+[0-9]*` + // amd64: `MOVWLZX 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*2\), [A-Z]+[0-9]*` if x16[i+1] < x16[0] { return 0 } - // amd64: `MOVWLZX\t2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[12]\), [A-Z]+[0-9]*` + // amd64: `MOVWLZX 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[12]\), [A-Z]+[0-9]*` if x16[16*i+1] < x16[0] { return 0 } - // amd64: `MOVL\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` + // amd64: `MOVL 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` if x32[i+1] < x32[0] { return 0 } - // amd64: `MOVL\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), [A-Z]+[0-9]*` + // amd64: `MOVL 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), [A-Z]+[0-9]*` if x32[16*i+1] < x32[0] { return 0 } - // amd64: `MOVQ\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` + // amd64: `MOVQ 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` if x64[i+1] < x64[0] { return 0 } - // amd64: `MOVQ\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\), [A-Z]+[0-9]*` + // amd64: `MOVQ 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\), [A-Z]+[0-9]*` if x64[16*i+1] < x64[0] { return 0 } - // amd64: `MOVBLZX\t2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\), [A-Z]+[0-9]*` + // amd64: `MOVBLZX 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\), [A-Z]+[0-9]*` if x8[i+2] < 77 { return 0 } - // amd64: `MOVWLZX\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*2\), [A-Z]+[0-9]*` + // amd64: `MOVWLZX 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*2\), [A-Z]+[0-9]*` if x16[i+2] < 77 { return 0 } - // amd64: `MOVWLZX\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[12]\), [A-Z]+[0-9]*` + // amd64: `MOVWLZX 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[12]\), [A-Z]+[0-9]*` if x16[16*i+2] < 77 { return 0 } - // amd64: `MOVL\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` + // amd64: `MOVL 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*` if x32[i+2] < 77 { return 0 } - // amd64: `MOVL\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), [A-Z]+[0-9]*` + // amd64: `MOVL 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), [A-Z]+[0-9]*` if x32[16*i+2] < 77 { return 0 } - // amd64: `MOVQ\t16\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` + // amd64: `MOVQ 16\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*` if x64[i+2] < 77 { return 0 } - // amd64: `MOVQ\t16\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\), [A-Z]+[0-9]*` + // amd64: `MOVQ 16\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\), [A-Z]+[0-9]*` if x64[16*i+2] < 77 { return 0 } @@ -345,59 +345,59 @@ func idxCompare(i int) int { func idxFloatOps(a []float64, b []float32, i int) (float64, float32) { c := float64(7) - // amd64: `ADDSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), X[0-9]+` + // amd64: `ADDSD 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), X[0-9]+` c += a[i+1] - // amd64: `SUBSD\t16\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), X[0-9]+` + // amd64: `SUBSD 16\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), X[0-9]+` c -= a[i+2] - // amd64: `MULSD\t24\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), X[0-9]+` + // amd64: `MULSD 24\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), X[0-9]+` c *= a[i+3] - // amd64: `DIVSD\t32\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), X[0-9]+` + // amd64: `DIVSD 32\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), X[0-9]+` c /= a[i+4] d := float32(8) - // amd64: `ADDSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), X[0-9]+` + // amd64: `ADDSS 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), X[0-9]+` d += b[i+1] - // amd64: `SUBSS\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), X[0-9]+` + // amd64: `SUBSS 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), X[0-9]+` d -= b[i+2] - // amd64: `MULSS\t12\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), X[0-9]+` + // amd64: `MULSS 12\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), X[0-9]+` d *= b[i+3] - // amd64: `DIVSS\t16\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), X[0-9]+` + // amd64: `DIVSS 16\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), X[0-9]+` d /= b[i+4] return c, d } func storeTest(a []bool, v int, i int) { - // amd64: `BTL\t\$0,`,`SETCS\t4\([A-Z]+[0-9]*\)` + // amd64: `BTL \$0,`,`SETCS 4\([A-Z]+[0-9]*\)` a[4] = v&1 != 0 - // amd64: `BTL\t\$1,`,`SETCS\t3\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\)` + // amd64: `BTL \$1,`,`SETCS 3\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\)` a[3+i] = v&2 != 0 } func bitOps(p *[12]uint64) { - // amd64: `ORQ\t\$8, \(AX\)` + // amd64: `ORQ \$8, \(AX\)` p[0] |= 8 - // amd64: `ORQ\t\$1073741824, 8\(AX\)` + // amd64: `ORQ \$1073741824, 8\(AX\)` p[1] |= 1 << 30 - // amd64: `BTSQ\t\$31, 16\(AX\)` + // amd64: `BTSQ \$31, 16\(AX\)` p[2] |= 1 << 31 - // amd64: `BTSQ\t\$63, 24\(AX\)` + // amd64: `BTSQ \$63, 24\(AX\)` p[3] |= 1 << 63 - // amd64: `ANDQ\t\$-9, 32\(AX\)` + // amd64: `ANDQ \$-9, 32\(AX\)` p[4] &^= 8 - // amd64: `ANDQ\t\$-1073741825, 40\(AX\)` + // amd64: `ANDQ \$-1073741825, 40\(AX\)` p[5] &^= 1 << 30 - // amd64: `BTRQ\t\$31, 48\(AX\)` + // amd64: `BTRQ \$31, 48\(AX\)` p[6] &^= 1 << 31 - // amd64: `BTRQ\t\$63, 56\(AX\)` + // amd64: `BTRQ \$63, 56\(AX\)` p[7] &^= 1 << 63 - // amd64: `XORQ\t\$8, 64\(AX\)` + // amd64: `XORQ \$8, 64\(AX\)` p[8] ^= 8 - // amd64: `XORQ\t\$1073741824, 72\(AX\)` + // amd64: `XORQ \$1073741824, 72\(AX\)` p[9] ^= 1 << 30 - // amd64: `BTCQ\t\$31, 80\(AX\)` + // amd64: `BTCQ \$31, 80\(AX\)` p[10] ^= 1 << 31 - // amd64: `BTCQ\t\$63, 88\(AX\)` + // amd64: `BTCQ \$63, 88\(AX\)` p[11] ^= 1 << 63 } diff --git a/test/codegen/multiply.go b/test/codegen/multiply.go index 8c408cbfbab..166bc104d00 100644 --- a/test/codegen/multiply.go +++ b/test/codegen/multiply.go @@ -11,8 +11,8 @@ package codegen func m0(x int64) int64 { // amd64: "XORL" - // arm64: "MOVD\tZR" - // loong64: "MOVV\tR0" + // arm64: "MOVD ZR" + // loong64: "MOVV R0" return x * 0 } func m2(x int64) int64 { @@ -22,351 +22,351 @@ func m2(x int64) int64 { return x * 2 } func m3(x int64) int64 { - // amd64: "LEAQ\t.*[*]2" - // arm64: "ADD\tR[0-9]+<<1," - // loong64: "ALSLV\t[$]1," + // amd64: "LEAQ .*[*]2" + // arm64: "ADD R[0-9]+<<1," + // loong64: "ALSLV [$]1," return x * 3 } func m4(x int64) int64 { - // amd64: "SHLQ\t[$]2," - // arm64: "LSL\t[$]2," - // loong64: "SLLV\t[$]2," + // amd64: "SHLQ [$]2," + // arm64: "LSL [$]2," + // loong64: "SLLV [$]2," return x * 4 } func m5(x int64) int64 { - // amd64: "LEAQ\t.*[*]4" - // arm64: "ADD\tR[0-9]+<<2," - // loong64: "ALSLV\t[$]2," + // amd64: "LEAQ .*[*]4" + // arm64: "ADD R[0-9]+<<2," + // loong64: "ALSLV [$]2," return x * 5 } func m6(x int64) int64 { - // amd64: "LEAQ\t.*[*]1", "LEAQ\t.*[*]2" - // arm64: "ADD\tR[0-9]+,", "ADD\tR[0-9]+<<1," + // amd64: "LEAQ .*[*]1", "LEAQ .*[*]2" + // arm64: "ADD R[0-9]+,", "ADD R[0-9]+<<1," // loong64: "ADDVU", "ADDVU", "ADDVU" return x * 6 } func m7(x int64) int64 { - // amd64: "LEAQ\t.*[*]2" - // arm64: "LSL\t[$]3,", "SUB\tR[0-9]+," - // loong64: "ALSLV\t[$]1,", "ALSLV\t[$]1," + // amd64: "LEAQ .*[*]2" + // arm64: "LSL [$]3,", "SUB R[0-9]+," + // loong64: "ALSLV [$]1,", "ALSLV [$]1," return x * 7 } func m8(x int64) int64 { - // amd64: "SHLQ\t[$]3," - // arm64: "LSL\t[$]3," - // loong64: "SLLV\t[$]3," + // amd64: "SHLQ [$]3," + // arm64: "LSL [$]3," + // loong64: "SLLV [$]3," return x * 8 } func m9(x int64) int64 { - // amd64: "LEAQ\t.*[*]8" - // arm64: "ADD\tR[0-9]+<<3," - // loong64: "ALSLV\t[$]3," + // amd64: "LEAQ .*[*]8" + // arm64: "ADD R[0-9]+<<3," + // loong64: "ALSLV [$]3," return x * 9 } func m10(x int64) int64 { - // amd64: "LEAQ\t.*[*]1", "LEAQ\t.*[*]4" - // arm64: "ADD\tR[0-9]+,", "ADD\tR[0-9]+<<2," - // loong64: "ADDVU", "ALSLV\t[$]2," + // amd64: "LEAQ .*[*]1", "LEAQ .*[*]4" + // arm64: "ADD R[0-9]+,", "ADD R[0-9]+<<2," + // loong64: "ADDVU", "ALSLV [$]2," return x * 10 } func m11(x int64) int64 { - // amd64: "LEAQ\t.*[*]4", "LEAQ\t.*[*]2" - // arm64: "MOVD\t[$]11,", "MUL" - // loong64: "ALSLV\t[$]2,", "ALSLV\t[$]1," + // amd64: "LEAQ .*[*]4", "LEAQ .*[*]2" + // arm64: "MOVD [$]11,", "MUL" + // loong64: "ALSLV [$]2,", "ALSLV [$]1," return x * 11 } func m12(x int64) int64 { - // amd64: "LEAQ\t.*[*]2", "SHLQ\t[$]2," - // arm64: "LSL\t[$]2,", "ADD\tR[0-9]+<<1," - // loong64: "SLLV", "ALSLV\t[$]1," + // amd64: "LEAQ .*[*]2", "SHLQ [$]2," + // arm64: "LSL [$]2,", "ADD R[0-9]+<<1," + // loong64: "SLLV", "ALSLV [$]1," return x * 12 } func m13(x int64) int64 { - // amd64: "LEAQ\t.*[*]2", "LEAQ\t.*[*]4" - // arm64: "MOVD\t[$]13,", "MUL" - // loong64: "ALSLV\t[$]1,", "ALSLV\t[$]2," + // amd64: "LEAQ .*[*]2", "LEAQ .*[*]4" + // arm64: "MOVD [$]13,", "MUL" + // loong64: "ALSLV [$]1,", "ALSLV [$]2," return x * 13 } func m14(x int64) int64 { - // amd64: "IMUL3Q\t[$]14," - // arm64: "LSL\t[$]4,", "SUB\tR[0-9]+<<1," - // loong64: "ADDVU", "ALSLV\t[$]1", "ALSLV\t[$]2" + // amd64: "IMUL3Q [$]14," + // arm64: "LSL [$]4,", "SUB R[0-9]+<<1," + // loong64: "ADDVU", "ALSLV [$]1", "ALSLV [$]2" return x * 14 } func m15(x int64) int64 { - // amd64: "LEAQ\t.*[*]2", "LEAQ\t.*[*]4" - // arm64: "LSL\t[$]4,", "SUB\tR[0-9]+," - // loong64: "ALSLV\t[$]1,", "ALSLV\t[$]2," + // amd64: "LEAQ .*[*]2", "LEAQ .*[*]4" + // arm64: "LSL [$]4,", "SUB R[0-9]+," + // loong64: "ALSLV [$]1,", "ALSLV [$]2," return x * 15 } func m16(x int64) int64 { - // amd64: "SHLQ\t[$]4," - // arm64: "LSL\t[$]4," - // loong64: "SLLV\t[$]4," + // amd64: "SHLQ [$]4," + // arm64: "LSL [$]4," + // loong64: "SLLV [$]4," return x * 16 } func m17(x int64) int64 { - // amd64: "LEAQ\t.*[*]1", "LEAQ\t.*[*]8" - // arm64: "ADD\tR[0-9]+<<4," - // loong64: "ALSLV\t[$]" + // amd64: "LEAQ .*[*]1", "LEAQ .*[*]8" + // arm64: "ADD R[0-9]+<<4," + // loong64: "ALSLV [$]" return x * 17 } func m18(x int64) int64 { - // amd64: "LEAQ\t.*[*]1", "LEAQ\t.*[*]8" - // arm64: "ADD\tR[0-9]+,", "ADD\tR[0-9]+<<3," - // loong64: "ADDVU", "ALSLV\t[$]3," + // amd64: "LEAQ .*[*]1", "LEAQ .*[*]8" + // arm64: "ADD R[0-9]+,", "ADD R[0-9]+<<3," + // loong64: "ADDVU", "ALSLV [$]3," return x * 18 } func m19(x int64) int64 { - // amd64: "LEAQ\t.*[*]8", "LEAQ\t.*[*]2" - // arm64: "MOVD\t[$]19,", "MUL" - // loong64: "ALSLV\t[$]3,", "ALSLV\t[$]1," + // amd64: "LEAQ .*[*]8", "LEAQ .*[*]2" + // arm64: "MOVD [$]19,", "MUL" + // loong64: "ALSLV [$]3,", "ALSLV [$]1," return x * 19 } func m20(x int64) int64 { - // amd64: "LEAQ\t.*[*]4", "SHLQ\t[$]2," - // arm64: "LSL\t[$]2,", "ADD\tR[0-9]+<<2," - // loong64: "SLLV\t[$]2,", "ALSLV\t[$]2," + // amd64: "LEAQ .*[*]4", "SHLQ [$]2," + // arm64: "LSL [$]2,", "ADD R[0-9]+<<2," + // loong64: "SLLV [$]2,", "ALSLV [$]2," return x * 20 } func m21(x int64) int64 { - // amd64: "LEAQ\t.*[*]4", "LEAQ\t.*[*]4" - // arm64: "MOVD\t[$]21,", "MUL" - // loong64: "ALSLV\t[$]2,", "ALSLV\t[$]2," + // amd64: "LEAQ .*[*]4", "LEAQ .*[*]4" + // arm64: "MOVD [$]21,", "MUL" + // loong64: "ALSLV [$]2,", "ALSLV [$]2," return x * 21 } func m22(x int64) int64 { - // amd64: "IMUL3Q\t[$]22," - // arm64: "MOVD\t[$]22,", "MUL" - // loong64: "ADDVU", "ALSLV\t[$]2,", "ALSLV\t[$]2," + // amd64: "IMUL3Q [$]22," + // arm64: "MOVD [$]22,", "MUL" + // loong64: "ADDVU", "ALSLV [$]2,", "ALSLV [$]2," return x * 22 } func m23(x int64) int64 { - // amd64: "IMUL3Q\t[$]23," - // arm64: "MOVD\t[$]23,", "MUL" - // loong64: "ALSLV\t[$]1,", "SUBVU", "ALSLV\t[$]3," + // amd64: "IMUL3Q [$]23," + // arm64: "MOVD [$]23,", "MUL" + // loong64: "ALSLV [$]1,", "SUBVU", "ALSLV [$]3," return x * 23 } func m24(x int64) int64 { - // amd64: "LEAQ\t.*[*]2", "SHLQ\t[$]3," - // arm64: "LSL\t[$]3,", "ADD\tR[0-9]+<<1," - // loong64: "SLLV\t[$]3", "ALSLV\t[$]1," + // amd64: "LEAQ .*[*]2", "SHLQ [$]3," + // arm64: "LSL [$]3,", "ADD R[0-9]+<<1," + // loong64: "SLLV [$]3", "ALSLV [$]1," return x * 24 } func m25(x int64) int64 { - // amd64: "LEAQ\t.*[*]4", "LEAQ\t.*[*]4" - // arm64: "MOVD\t[$]25,", "MUL" - // loong64: "ALSLV\t[$]2,", "ALSLV\t[$]2," + // amd64: "LEAQ .*[*]4", "LEAQ .*[*]4" + // arm64: "MOVD [$]25,", "MUL" + // loong64: "ALSLV [$]2,", "ALSLV [$]2," return x * 25 } func m26(x int64) int64 { - // amd64: "IMUL3Q\t[$]26," - // arm64: "MOVD\t[$]26,", "MUL" - // loong64: "ADDVU", "ALSLV\t[$]1,", "ALSLV\t[$]3," + // amd64: "IMUL3Q [$]26," + // arm64: "MOVD [$]26,", "MUL" + // loong64: "ADDVU", "ALSLV [$]1,", "ALSLV [$]3," return x * 26 } func m27(x int64) int64 { - // amd64: "LEAQ\t.*[*]2", "LEAQ\t.*[*]8" - // arm64: "MOVD\t[$]27,", "MUL" - // loong64: "ALSLV\t[$]1,", "ALSLV\t[$]3," + // amd64: "LEAQ .*[*]2", "LEAQ .*[*]8" + // arm64: "MOVD [$]27,", "MUL" + // loong64: "ALSLV [$]1,", "ALSLV [$]3," return x * 27 } func m28(x int64) int64 { - // amd64: "IMUL3Q\t[$]28," - // arm64: "LSL\t[$]5, "SUB\tR[0-9]+<<2," - // loong64: "ALSLV\t[$]1,","SLLV\t[$]2,","ALSLV\t[$]3," + // amd64: "IMUL3Q [$]28," + // arm64: "LSL [$]5, "SUB R[0-9]+<<2," + // loong64: "ALSLV [$]1," "SLLV [$]2," "ALSLV [$]3," return x * 28 } func m29(x int64) int64 { - // amd64: "IMUL3Q\t[$]29," - // arm64: "MOVD\t[$]29,", "MUL" - // loong64: "ALSLV\t[$]1,","SLLV\t[$]5,","SUBVU" + // amd64: "IMUL3Q [$]29," + // arm64: "MOVD [$]29,", "MUL" + // loong64: "ALSLV [$]1," "SLLV [$]5," "SUBVU" return x * 29 } func m30(x int64) int64 { - // amd64: "IMUL3Q\t[$]30," - // arm64: "LSL\t[$]5,", "SUB\tR[0-9]+<<1," - // loong64: "ADDVU","SLLV\t[$]5,","SUBVU" + // amd64: "IMUL3Q [$]30," + // arm64: "LSL [$]5,", "SUB R[0-9]+<<1," + // loong64: "ADDVU" "SLLV [$]5," "SUBVU" return x * 30 } func m31(x int64) int64 { - // amd64: "SHLQ\t[$]5,", "SUBQ" - // arm64: "LSL\t[$]5,", "SUB\tR[0-9]+," - // loong64: "SLLV\t[$]5,","SUBVU" + // amd64: "SHLQ [$]5,", "SUBQ" + // arm64: "LSL [$]5,", "SUB R[0-9]+," + // loong64: "SLLV [$]5," "SUBVU" return x * 31 } func m32(x int64) int64 { - // amd64: "SHLQ\t[$]5," - // arm64: "LSL\t[$]5," - // loong64: "SLLV\t[$]5," + // amd64: "SHLQ [$]5," + // arm64: "LSL [$]5," + // loong64: "SLLV [$]5," return x * 32 } func m33(x int64) int64 { - // amd64: "SHLQ\t[$]2,", "LEAQ\t.*[*]8" - // arm64: "ADD\tR[0-9]+<<5," - // loong64: "ADDVU", "ALSLV\t[$]4," + // amd64: "SHLQ [$]2,", "LEAQ .*[*]8" + // arm64: "ADD R[0-9]+<<5," + // loong64: "ADDVU", "ALSLV [$]4," return x * 33 } func m34(x int64) int64 { - // amd64: "SHLQ\t[$]5,", "LEAQ\t.*[*]2" - // arm64: "ADD\tR[0-9]+,", "ADD\tR[0-9]+<<4," - // loong64: "ADDVU", "ALSLV\t[$]4," + // amd64: "SHLQ [$]5,", "LEAQ .*[*]2" + // arm64: "ADD R[0-9]+,", "ADD R[0-9]+<<4," + // loong64: "ADDVU", "ALSLV [$]4," return x * 34 } func m35(x int64) int64 { - // amd64: "IMUL3Q\t[$]35," - // arm64: "MOVD\t[$]35,", "MUL" - // loong64: "ALSLV\t[$]4,", "ALSLV\t[$]1," + // amd64: "IMUL3Q [$]35," + // arm64: "MOVD [$]35,", "MUL" + // loong64: "ALSLV [$]4,", "ALSLV [$]1," return x * 35 } func m36(x int64) int64 { - // amd64: "LEAQ\t.*[*]8", "SHLQ\t[$]2," - // arm64: "LSL\t[$]2,", "ADD\tR[0-9]+<<3," - // loong64: "SLLV\t[$]2,", "ALSLV\t[$]3," + // amd64: "LEAQ .*[*]8", "SHLQ [$]2," + // arm64: "LSL [$]2,", "ADD R[0-9]+<<3," + // loong64: "SLLV [$]2,", "ALSLV [$]3," return x * 36 } func m37(x int64) int64 { - // amd64: "LEAQ\t.*[*]8", "LEAQ\t.*[*]4" - // arm64: "MOVD\t[$]37,", "MUL" - // loong64: "ALSLV\t[$]3,", "ALSLV\t[$]2," + // amd64: "LEAQ .*[*]8", "LEAQ .*[*]4" + // arm64: "MOVD [$]37,", "MUL" + // loong64: "ALSLV [$]3,", "ALSLV [$]2," return x * 37 } func m38(x int64) int64 { - // amd64: "IMUL3Q\t[$]38," - // arm64: "MOVD\t[$]38,", "MUL" - // loong64: "ALSLV\t[$]3,", "ALSLV\t[$]2," + // amd64: "IMUL3Q [$]38," + // arm64: "MOVD [$]38,", "MUL" + // loong64: "ALSLV [$]3,", "ALSLV [$]2," return x * 38 } func m39(x int64) int64 { - // amd64: "IMUL3Q\t[$]39," - // arm64: "MOVD\t[$]39,", "MUL" - // loong64: "ALSLV\t[$]2,", "SUBVU", "ALSLV\t[$]3," + // amd64: "IMUL3Q [$]39," + // arm64: "MOVD [$]39,", "MUL" + // loong64: "ALSLV [$]2,", "SUBVU", "ALSLV [$]3," return x * 39 } func m40(x int64) int64 { - // amd64: "LEAQ\t.*[*]4", "SHLQ\t[$]3," - // arm64: "LSL\t[$]3,", "ADD\tR[0-9]+<<2," - // loong64: "SLLV\t[$]3,", "ALSLV\t[$]2," + // amd64: "LEAQ .*[*]4", "SHLQ [$]3," + // arm64: "LSL [$]3,", "ADD R[0-9]+<<2," + // loong64: "SLLV [$]3,", "ALSLV [$]2," return x * 40 } func mn1(x int64) int64 { - // amd64: "NEGQ\t" - // arm64: "NEG\tR[0-9]+," - // loong64: "SUBVU\tR[0-9], R0," + // amd64: "NEGQ " + // arm64: "NEG R[0-9]+," + // loong64: "SUBVU R[0-9], R0," return x * -1 } func mn2(x int64) int64 { // amd64: "NEGQ", "ADDQ" - // arm64: "NEG\tR[0-9]+<<1," - // loong64: "ADDVU","SUBVU\tR[0-9], R0," + // arm64: "NEG R[0-9]+<<1," + // loong64: "ADDVU" "SUBVU R[0-9], R0," return x * -2 } func mn3(x int64) int64 { - // amd64: "NEGQ", "LEAQ\t.*[*]2" - // arm64: "SUB\tR[0-9]+<<2," - // loong64: "SUBVU", "ALSLV\t[$]1," + // amd64: "NEGQ", "LEAQ .*[*]2" + // arm64: "SUB R[0-9]+<<2," + // loong64: "SUBVU", "ALSLV [$]1," return x * -3 } func mn4(x int64) int64 { - // amd64: "NEGQ", "SHLQ\t[$]2," - // arm64: "NEG\tR[0-9]+<<2," - // loong64: "SLLV\t[$]2,","SUBVU\tR[0-9], R0," + // amd64: "NEGQ", "SHLQ [$]2," + // arm64: "NEG R[0-9]+<<2," + // loong64: "SLLV [$]2," "SUBVU R[0-9], R0," return x * -4 } func mn5(x int64) int64 { - // amd64: "NEGQ", "LEAQ\t.*[*]4" - // arm64: "NEG\tR[0-9]+,", "ADD\tR[0-9]+<<2," - // loong64: "SUBVU", "ALSLV\t[$]2," + // amd64: "NEGQ", "LEAQ .*[*]4" + // arm64: "NEG R[0-9]+,", "ADD R[0-9]+<<2," + // loong64: "SUBVU", "ALSLV [$]2," return x * -5 } func mn6(x int64) int64 { - // amd64: "IMUL3Q\t[$]-6," - // arm64: "ADD\tR[0-9]+,", "SUB\tR[0-9]+<<2," - // loong64: "ADDVU", "SUBVU", "ALSLV\t[$]3," + // amd64: "IMUL3Q [$]-6," + // arm64: "ADD R[0-9]+,", "SUB R[0-9]+<<2," + // loong64: "ADDVU", "SUBVU", "ALSLV [$]3," return x * -6 } func mn7(x int64) int64 { - // amd64: "NEGQ", "LEAQ\t.*[*]8" - // arm64: "SUB\tR[0-9]+<<3," - // loong64: "SUBVU", "ALSLV\t[$]3," + // amd64: "NEGQ", "LEAQ .*[*]8" + // arm64: "SUB R[0-9]+<<3," + // loong64: "SUBVU", "ALSLV [$]3," return x * -7 } func mn8(x int64) int64 { - // amd64: "NEGQ", "SHLQ\t[$]3," - // arm64: "NEG\tR[0-9]+<<3," - // loong64: "SLLV\t[$]3","SUBVU\tR[0-9], R0," + // amd64: "NEGQ", "SHLQ [$]3," + // arm64: "NEG R[0-9]+<<3," + // loong64: "SLLV [$]3" "SUBVU R[0-9], R0," return x * -8 } func mn9(x int64) int64 { - // amd64: "NEGQ", "LEAQ\t.*[*]8" - // arm64: "NEG\tR[0-9]+,", "ADD\tR[0-9]+<<3," - // loong64: "SUBVU", "ALSLV\t[$]3," + // amd64: "NEGQ", "LEAQ .*[*]8" + // arm64: "NEG R[0-9]+,", "ADD R[0-9]+<<3," + // loong64: "SUBVU", "ALSLV [$]3," return x * -9 } func mn10(x int64) int64 { - // amd64: "IMUL3Q\t[$]-10," - // arm64: "MOVD\t[$]-10,", "MUL" - // loong64: "ADDVU", "ALSLV\t[$]3", "SUBVU" + // amd64: "IMUL3Q [$]-10," + // arm64: "MOVD [$]-10,", "MUL" + // loong64: "ADDVU", "ALSLV [$]3", "SUBVU" return x * -10 } func mn11(x int64) int64 { - // amd64: "IMUL3Q\t[$]-11," - // arm64: "MOVD\t[$]-11,", "MUL" - // loong64: "ALSLV\t[$]2,", "SUBVU", "ALSLV\t[$]4," + // amd64: "IMUL3Q [$]-11," + // arm64: "MOVD [$]-11,", "MUL" + // loong64: "ALSLV [$]2,", "SUBVU", "ALSLV [$]4," return x * -11 } func mn12(x int64) int64 { - // amd64: "IMUL3Q\t[$]-12," - // arm64: "LSL\t[$]2,", "SUB\tR[0-9]+<<2," - // loong64: "SUBVU", "SLLV\t[$]2,", "ALSLV\t[$]4," + // amd64: "IMUL3Q [$]-12," + // arm64: "LSL [$]2,", "SUB R[0-9]+<<2," + // loong64: "SUBVU", "SLLV [$]2,", "ALSLV [$]4," return x * -12 } func mn13(x int64) int64 { - // amd64: "IMUL3Q\t[$]-13," - // arm64: "MOVD\t[$]-13,", "MUL" - // loong64: "ALSLV\t[$]4,", "SLLV\t[$]2, ", "SUBVU" + // amd64: "IMUL3Q [$]-13," + // arm64: "MOVD [$]-13,", "MUL" + // loong64: "ALSLV [$]4,", "SLLV [$]2, ", "SUBVU" return x * -13 } func mn14(x int64) int64 { - // amd64: "IMUL3Q\t[$]-14," - // arm64: "ADD\tR[0-9]+,", "SUB\tR[0-9]+<<3," - // loong64: "ADDVU", "SUBVU", "ALSLV\t[$]4," + // amd64: "IMUL3Q [$]-14," + // arm64: "ADD R[0-9]+,", "SUB R[0-9]+<<3," + // loong64: "ADDVU", "SUBVU", "ALSLV [$]4," return x * -14 } func mn15(x int64) int64 { - // amd64: "SHLQ\t[$]4,", "SUBQ" - // arm64: "SUB\tR[0-9]+<<4," - // loong64: "SUBVU", "ALSLV\t[$]4," + // amd64: "SHLQ [$]4,", "SUBQ" + // arm64: "SUB R[0-9]+<<4," + // loong64: "SUBVU", "ALSLV [$]4," return x * -15 } func mn16(x int64) int64 { - // amd64: "NEGQ", "SHLQ\t[$]4," - // arm64: "NEG\tR[0-9]+<<4," - // loong64: "SLLV\t[$]4,","SUBVU\tR[0-9], R0," + // amd64: "NEGQ", "SHLQ [$]4," + // arm64: "NEG R[0-9]+<<4," + // loong64: "SLLV [$]4," "SUBVU R[0-9], R0," return x * -16 } func mn17(x int64) int64 { - // amd64: "IMUL3Q\t[$]-17," - // arm64: "NEG\tR[0-9]+,", "ADD\tR[0-9]+<<4," - // loong64: "SUBVU", "ALSLV\t[$]4," + // amd64: "IMUL3Q [$]-17," + // arm64: "NEG R[0-9]+,", "ADD R[0-9]+<<4," + // loong64: "SUBVU", "ALSLV [$]4," return x * -17 } func mn18(x int64) int64 { - // amd64: "IMUL3Q\t[$]-18," - // arm64: "MOVD\t[$]-18,", "MUL" - // loong64: "ADDVU", "ALSLV\t[$]4,", "SUBVU" + // amd64: "IMUL3Q [$]-18," + // arm64: "MOVD [$]-18,", "MUL" + // loong64: "ADDVU", "ALSLV [$]4,", "SUBVU" return x * -18 } func mn19(x int64) int64 { - // amd64: "IMUL3Q\t[$]-19," - // arm64: "MOVD\t[$]-19,", "MUL" - // loong64: "ALSLV\t[$]1,", "ALSLV\t[$]4,", "SUBVU" + // amd64: "IMUL3Q [$]-19," + // arm64: "MOVD [$]-19,", "MUL" + // loong64: "ALSLV [$]1,", "ALSLV [$]4,", "SUBVU" return x * -19 } func mn20(x int64) int64 { - // amd64: "IMUL3Q\t[$]-20," - // arm64: "MOVD\t[$]-20,", "MUL" - // loong64: "SLLV\t[$]2,", "ALSLV\t[$]4,", "SUBVU" + // amd64: "IMUL3Q [$]-20," + // arm64: "MOVD [$]-20,", "MUL" + // loong64: "SLLV [$]2,", "ALSLV [$]4,", "SUBVU" return x * -20 } diff --git a/test/codegen/noextend.go b/test/codegen/noextend.go index 193f75b092e..d9f1c77289b 100644 --- a/test/codegen/noextend.go +++ b/test/codegen/noextend.go @@ -26,7 +26,7 @@ var val8 [8]uint8 func set16(x8 int8, u8 *uint8, y8 int8, z8 uint8) { // Truncate not needed, load does sign/zero extend - // ppc64x:-"MOVBZ\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVBZ R\\d+,\\sR\\d+" val16[0] = uint16(*u8) // AND not needed due to size @@ -39,20 +39,20 @@ func set16(x8 int8, u8 *uint8, y8 int8, z8 uint8) { } func shiftidx(u8 *uint8, x16 *int16, u16 *uint16) { - // ppc64x:-"MOVBZ\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVBZ R\\d+,\\sR\\d+" val16[0] = uint16(sval16[*u8>>2]) - // ppc64x:-"MOVH\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVH R\\d+,\\sR\\d+" sval16[1] = int16(val16[*x16>>1]) - // ppc64x:-"MOVHZ\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVHZ R\\d+,\\sR\\d+" val16[1] = uint16(sval16[*u16>>2]) } func setnox(x8 int8, u8 *uint8, y8 *int8, z8 *uint8, x16 *int16, u16 *uint16, x32 *int32, u32 *uint32) { - // ppc64x:-"MOVBZ\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVBZ R\\d+,\\sR\\d+" val16[0] = uint16(*u8) // AND not needed due to size @@ -62,65 +62,65 @@ func setnox(x8 int8, u8 *uint8, y8 *int8, z8 *uint8, x16 *int16, u16 *uint16, x3 // ppc64x:-"ANDCC" val16[1] = 255 & uint16(*u8+*z8) - // ppc64x:-"MOVH\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVH R\\d+,\\sR\\d+" sval32[1] = int32(*x16) - // ppc64x:-"MOVBZ\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVBZ R\\d+,\\sR\\d+" val32[0] = uint32(*u8) - // ppc64x:-"MOVHZ\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVHZ R\\d+,\\sR\\d+" val32[1] = uint32(*u16) - // ppc64x:-"MOVH\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVH R\\d+,\\sR\\d+" sval64[1] = int64(*x16) - // ppc64x:-"MOVW\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVW R\\d+,\\sR\\d+" sval64[2] = int64(*x32) - // ppc64x:-"MOVBZ\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVBZ R\\d+,\\sR\\d+" val64[0] = uint64(*u8) - // ppc64x:-"MOVHZ\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVHZ R\\d+,\\sR\\d+" val64[1] = uint64(*u16) - // ppc64x:-"MOVWZ\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVWZ R\\d+,\\sR\\d+" val64[2] = uint64(*u32) } func cmp16(u8 *uint8, x32 *int32, u32 *uint32, x64 *int64, u64 *uint64) bool { - // ppc64x:-"MOVBZ\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVBZ R\\d+,\\sR\\d+" if uint16(*u8) == val16[0] { return true } - // ppc64x:-"MOVHZ\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVHZ R\\d+,\\sR\\d+" if uint16(*u32>>16) == val16[0] { return true } - // ppc64x:-"MOVHZ\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVHZ R\\d+,\\sR\\d+" if uint16(*u64>>48) == val16[0] { return true } // Verify the truncates are using the correct sign. - // ppc64x:-"MOVHZ\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVHZ R\\d+,\\sR\\d+" if int16(*x32) == sval16[0] { return true } - // ppc64x:-"MOVH\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVH R\\d+,\\sR\\d+" if uint16(*u32) == val16[0] { return true } - // ppc64x:-"MOVHZ\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVHZ R\\d+,\\sR\\d+" if int16(*x64) == sval16[0] { return true } - // ppc64x:-"MOVH\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVH R\\d+,\\sR\\d+" if uint16(*u64) == val16[0] { return true } @@ -130,28 +130,28 @@ func cmp16(u8 *uint8, x32 *int32, u32 *uint32, x64 *int64, u64 *uint64) bool { func cmp32(u8 *uint8, x16 *int16, u16 *uint16, x64 *int64, u64 *uint64) bool { - // ppc64x:-"MOVBZ\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVBZ R\\d+,\\sR\\d+" if uint32(*u8) == val32[0] { return true } - // ppc64x:-"MOVH\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVH R\\d+,\\sR\\d+" if int32(*x16) == sval32[0] { return true } - // ppc64x:-"MOVHZ\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVHZ R\\d+,\\sR\\d+" if uint32(*u16) == val32[0] { return true } // Verify the truncates are using the correct sign. - // ppc64x:-"MOVWZ\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVWZ R\\d+,\\sR\\d+" if int32(*x64) == sval32[0] { return true } - // ppc64x:-"MOVW\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVW R\\d+,\\sR\\d+" if uint32(*u64) == val32[0] { return true } @@ -161,27 +161,27 @@ func cmp32(u8 *uint8, x16 *int16, u16 *uint16, x64 *int64, u64 *uint64) bool { func cmp64(u8 *uint8, x16 *int16, u16 *uint16, x32 *int32, u32 *uint32) bool { - // ppc64x:-"MOVBZ\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVBZ R\\d+,\\sR\\d+" if uint64(*u8) == val64[0] { return true } - // ppc64x:-"MOVH\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVH R\\d+,\\sR\\d+" if int64(*x16) == sval64[0] { return true } - // ppc64x:-"MOVHZ\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVHZ R\\d+,\\sR\\d+" if uint64(*u16) == val64[0] { return true } - // ppc64x:-"MOVW\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVW R\\d+,\\sR\\d+" if int64(*x32) == sval64[0] { return true } - // ppc64x:-"MOVWZ\tR\\d+,\\sR\\d+" + // ppc64x:-"MOVWZ R\\d+,\\sR\\d+" if uint64(*u32) == val64[0] { return true } @@ -193,40 +193,40 @@ func cmp64(u8 *uint8, x16 *int16, u16 *uint16, x32 *int32, u32 *uint32) bool { func noUnsignEXT(t1, t2, t3, t4 uint32, k int64) uint64 { var ret uint64 - // arm64:"RORW",-"MOVWU" + // arm64:"RORW" -"MOVWU" ret += uint64(bits.RotateLeft32(t1, 7)) - // arm64:"MULW",-"MOVWU" + // arm64:"MULW" -"MOVWU" ret *= uint64(t1 * t2) - // arm64:"MNEGW",-"MOVWU" + // arm64:"MNEGW" -"MOVWU" ret += uint64(-t1 * t3) - // arm64:"UDIVW",-"MOVWU" + // arm64:"UDIVW" -"MOVWU" ret += uint64(t1 / t4) // arm64:-"MOVWU" ret += uint64(t2 % t3) - // arm64:"MSUBW",-"MOVWU" + // arm64:"MSUBW" -"MOVWU" ret += uint64(t1 - t2*t3) - // arm64:"MADDW",-"MOVWU" + // arm64:"MADDW" -"MOVWU" ret += uint64(t3*t4 + t2) - // arm64:"REVW",-"MOVWU" + // arm64:"REVW" -"MOVWU" ret += uint64(bits.ReverseBytes32(t1)) - // arm64:"RBITW",-"MOVWU" + // arm64:"RBITW" -"MOVWU" ret += uint64(bits.Reverse32(t1)) - // arm64:"CLZW",-"MOVWU" + // arm64:"CLZW" -"MOVWU" ret += uint64(bits.LeadingZeros32(t1)) - // arm64:"REV16W",-"MOVWU" + // arm64:"REV16W" -"MOVWU" ret += uint64(((t1 & 0xff00ff00) >> 8) | ((t1 & 0x00ff00ff) << 8)) - // arm64:"EXTRW",-"MOVWU" + // arm64:"EXTRW" -"MOVWU" ret += uint64((t1 << 25) | (t2 >> 7)) return ret diff --git a/test/codegen/reflect_type.go b/test/codegen/reflect_type.go index b92a9567f14..6b3eae96f5b 100644 --- a/test/codegen/reflect_type.go +++ b/test/codegen/reflect_type.go @@ -9,13 +9,13 @@ package codegen import "reflect" func intPtrTypeSize() uintptr { - // amd64:"MOVL\t[$]8,",-"CALL" - // arm64:"MOVD\t[$]8,",-"CALL" + // amd64:"MOVL [$]8," -"CALL" + // arm64:"MOVD [$]8," -"CALL" return reflect.TypeFor[*int]().Size() } func intPtrTypeKind() reflect.Kind { - // amd64:"MOVL\t[$]22,",-"CALL" - // arm64:"MOVD\t[$]22,",-"CALL" + // amd64:"MOVL [$]22," -"CALL" + // arm64:"MOVD [$]22," -"CALL" return reflect.TypeFor[*int]().Kind() } diff --git a/test/codegen/regabi_regalloc.go b/test/codegen/regabi_regalloc.go index a7b7bd52bc8..8ac6a3da9c7 100644 --- a/test/codegen/regabi_regalloc.go +++ b/test/codegen/regabi_regalloc.go @@ -8,13 +8,13 @@ package codegen //go:registerparams func f1(a, b int) { - // amd64:"MOVQ\tBX, CX", "MOVQ\tAX, BX", "MOVL\t\\$1, AX", -"MOVQ\t.*DX" + // amd64:"MOVQ BX, CX", "MOVQ AX, BX", "MOVL [$]1, AX", -"MOVQ .*DX" g(1, a, b) } //go:registerparams func f2(a, b int) { - // amd64:"MOVQ\tBX, AX", "MOVQ\t[AB]X, CX", -"MOVQ\t.*, BX" + // amd64:"MOVQ BX, AX", "MOVQ [AB]X, CX", -"MOVQ .*, BX" g(b, b, b) } diff --git a/test/codegen/retpoline.go b/test/codegen/retpoline.go index 0e8f661ec99..e733a49d8aa 100644 --- a/test/codegen/retpoline.go +++ b/test/codegen/retpoline.go @@ -5,12 +5,12 @@ package codegen func CallFunc(f func()) { - // amd64:`CALL\truntime.retpoline` + // amd64:`CALL runtime.retpoline` f() } func CallInterface(x interface{ M() }) { - // amd64:`CALL\truntime.retpoline` + // amd64:`CALL runtime.retpoline` x.M() } diff --git a/test/codegen/rotate.go b/test/codegen/rotate.go index 121ce4cc0a5..17bd27a4d02 100644 --- a/test/codegen/rotate.go +++ b/test/codegen/rotate.go @@ -15,36 +15,36 @@ import "math/bits" func rot64(x uint64) uint64 { var a uint64 - // amd64:"ROLQ\t[$]7" - // ppc64x:"ROTL\t[$]7" - // loong64: "ROTRV\t[$]57" - // riscv64: "RORI\t[$]57" + // amd64:"ROLQ [$]7" + // ppc64x:"ROTL [$]7" + // loong64: "ROTRV [$]57" + // riscv64: "RORI [$]57" a += x<<7 | x>>57 - // amd64:"ROLQ\t[$]8" - // arm64:"ROR\t[$]56" - // s390x:"RISBGZ\t[$]0, [$]63, [$]8, " - // ppc64x:"ROTL\t[$]8" - // loong64: "ROTRV\t[$]56" - // riscv64: "RORI\t[$]56" + // amd64:"ROLQ [$]8" + // arm64:"ROR [$]56" + // s390x:"RISBGZ [$]0, [$]63, [$]8, " + // ppc64x:"ROTL [$]8" + // loong64: "ROTRV [$]56" + // riscv64: "RORI [$]56" a += x<<8 + x>>56 - // amd64:"ROLQ\t[$]9" - // arm64:"ROR\t[$]55" - // s390x:"RISBGZ\t[$]0, [$]63, [$]9, " - // ppc64x:"ROTL\t[$]9" - // loong64: "ROTRV\t[$]55" - // riscv64: "RORI\t[$]55" + // amd64:"ROLQ [$]9" + // arm64:"ROR [$]55" + // s390x:"RISBGZ [$]0, [$]63, [$]9, " + // ppc64x:"ROTL [$]9" + // loong64: "ROTRV [$]55" + // riscv64: "RORI [$]55" a += x<<9 ^ x>>55 - // amd64:"ROLQ\t[$]10" - // arm64:"ROR\t[$]54" - // s390x:"RISBGZ\t[$]0, [$]63, [$]10, " - // ppc64x:"ROTL\t[$]10" - // arm64:"ROR\t[$]54" - // s390x:"RISBGZ\t[$]0, [$]63, [$]10, " - // loong64: "ROTRV\t[$]54" - // riscv64: "RORI\t[$]54" + // amd64:"ROLQ [$]10" + // arm64:"ROR [$]54" + // s390x:"RISBGZ [$]0, [$]63, [$]10, " + // ppc64x:"ROTL [$]10" + // arm64:"ROR [$]54" + // s390x:"RISBGZ [$]0, [$]63, [$]10, " + // loong64: "ROTRV [$]54" + // riscv64: "RORI [$]54" a += bits.RotateLeft64(x, 10) return a @@ -53,40 +53,40 @@ func rot64(x uint64) uint64 { func rot32(x uint32) uint32 { var a uint32 - // amd64:"ROLL\t[$]7" - // arm:"MOVW\tR\\d+@>25" - // ppc64x:"ROTLW\t[$]7" - // loong64: "ROTR\t[$]25" - // riscv64: "RORIW\t[$]25" + // amd64:"ROLL [$]7" + // arm:"MOVW R\\d+@>25" + // ppc64x:"ROTLW [$]7" + // loong64: "ROTR [$]25" + // riscv64: "RORIW [$]25" a += x<<7 | x>>25 - // amd64:`ROLL\t[$]8` - // arm:"MOVW\tR\\d+@>24" - // arm64:"RORW\t[$]24" - // s390x:"RLL\t[$]8" - // ppc64x:"ROTLW\t[$]8" - // loong64: "ROTR\t[$]24" - // riscv64: "RORIW\t[$]24" + // amd64:`ROLL [$]8` + // arm:"MOVW R\\d+@>24" + // arm64:"RORW [$]24" + // s390x:"RLL [$]8" + // ppc64x:"ROTLW [$]8" + // loong64: "ROTR [$]24" + // riscv64: "RORIW [$]24" a += x<<8 + x>>24 - // amd64:"ROLL\t[$]9" - // arm:"MOVW\tR\\d+@>23" - // arm64:"RORW\t[$]23" - // s390x:"RLL\t[$]9" - // ppc64x:"ROTLW\t[$]9" - // loong64: "ROTR\t[$]23" - // riscv64: "RORIW\t[$]23" + // amd64:"ROLL [$]9" + // arm:"MOVW R\\d+@>23" + // arm64:"RORW [$]23" + // s390x:"RLL [$]9" + // ppc64x:"ROTLW [$]9" + // loong64: "ROTR [$]23" + // riscv64: "RORIW [$]23" a += x<<9 ^ x>>23 - // amd64:"ROLL\t[$]10" - // arm:"MOVW\tR\\d+@>22" - // arm64:"RORW\t[$]22" - // s390x:"RLL\t[$]10" - // ppc64x:"ROTLW\t[$]10" - // arm64:"RORW\t[$]22" - // s390x:"RLL\t[$]10" - // loong64: "ROTR\t[$]22" - // riscv64: "RORIW\t[$]22" + // amd64:"ROLL [$]10" + // arm:"MOVW R\\d+@>22" + // arm64:"RORW [$]22" + // s390x:"RLL [$]10" + // ppc64x:"ROTLW [$]10" + // arm64:"RORW [$]22" + // s390x:"RLL [$]10" + // loong64: "ROTR [$]22" + // riscv64: "RORIW [$]22" a += bits.RotateLeft32(x, 10) return a @@ -95,16 +95,16 @@ func rot32(x uint32) uint32 { func rot16(x uint16) uint16 { var a uint16 - // amd64:"ROLW\t[$]7" - // riscv64: "OR","SLLI","SRLI",-"AND" + // amd64:"ROLW [$]7" + // riscv64: "OR" "SLLI" "SRLI" -"AND" a += x<<7 | x>>9 - // amd64:`ROLW\t[$]8` - // riscv64: "OR","SLLI","SRLI",-"AND" + // amd64:`ROLW [$]8` + // riscv64: "OR" "SLLI" "SRLI" -"AND" a += x<<8 + x>>8 - // amd64:"ROLW\t[$]9" - // riscv64: "OR","SLLI","SRLI",-"AND" + // amd64:"ROLW [$]9" + // riscv64: "OR" "SLLI" "SRLI" -"AND" a += x<<9 ^ x>>7 return a @@ -113,16 +113,16 @@ func rot16(x uint16) uint16 { func rot8(x uint8) uint8 { var a uint8 - // amd64:"ROLB\t[$]5" - // riscv64: "OR","SLLI","SRLI",-"AND" + // amd64:"ROLB [$]5" + // riscv64: "OR" "SLLI" "SRLI" -"AND" a += x<<5 | x>>3 - // amd64:`ROLB\t[$]6` - // riscv64: "OR","SLLI","SRLI",-"AND" + // amd64:`ROLB [$]6` + // riscv64: "OR" "SLLI" "SRLI" -"AND" a += x<<6 + x>>2 - // amd64:"ROLB\t[$]7" - // riscv64: "OR","SLLI","SRLI",-"AND" + // amd64:"ROLB [$]7" + // riscv64: "OR" "SLLI" "SRLI" -"AND" a += x<<7 ^ x>>1 return a @@ -137,18 +137,18 @@ func rot64nc(x uint64, z uint) uint64 { z &= 63 - // amd64:"ROLQ",-"AND" - // arm64:"ROR","NEG",-"AND" - // ppc64x:"ROTL",-"NEG",-"AND" + // amd64:"ROLQ" -"AND" + // arm64:"ROR" "NEG" -"AND" + // ppc64x:"ROTL" -"NEG" -"AND" // loong64: "ROTRV", -"AND" - // riscv64: "ROL",-"AND" + // riscv64: "ROL" -"AND" a += x<>(64-z) - // amd64:"RORQ",-"AND" - // arm64:"ROR",-"NEG",-"AND" - // ppc64x:"ROTL","NEG",-"AND" + // amd64:"RORQ" -"AND" + // arm64:"ROR" -"NEG" -"AND" + // ppc64x:"ROTL" "NEG" -"AND" // loong64: "ROTRV", -"AND" - // riscv64: "ROR",-"AND" + // riscv64: "ROR" -"AND" a += x>>z | x<<(64-z) return a @@ -159,18 +159,18 @@ func rot32nc(x uint32, z uint) uint32 { z &= 31 - // amd64:"ROLL",-"AND" - // arm64:"ROR","NEG",-"AND" - // ppc64x:"ROTLW",-"NEG",-"AND" + // amd64:"ROLL" -"AND" + // arm64:"ROR" "NEG" -"AND" + // ppc64x:"ROTLW" -"NEG" -"AND" // loong64: "ROTR", -"AND" - // riscv64: "ROLW",-"AND" + // riscv64: "ROLW" -"AND" a += x<>(32-z) - // amd64:"RORL",-"AND" - // arm64:"ROR",-"NEG",-"AND" - // ppc64x:"ROTLW","NEG",-"AND" + // amd64:"RORL" -"AND" + // arm64:"ROR" -"NEG" -"AND" + // ppc64x:"ROTLW" "NEG" -"AND" // loong64: "ROTR", -"AND" - // riscv64: "RORW",-"AND" + // riscv64: "RORW" -"AND" a += x>>z | x<<(32-z) return a @@ -181,12 +181,12 @@ func rot16nc(x uint16, z uint) uint16 { z &= 15 - // amd64:"ROLW",-"ANDQ" - // riscv64: "OR","SLL","SRL",-"AND\t" + // amd64:"ROLW" -"ANDQ" + // riscv64: "OR" "SLL" "SRL" -"AND " a += x<>(16-z) - // amd64:"RORW",-"ANDQ" - // riscv64: "OR","SLL","SRL",-"AND\t" + // amd64:"RORW" -"ANDQ" + // riscv64: "OR" "SLL" "SRL" -"AND " a += x>>z | x<<(16-z) return a @@ -197,12 +197,12 @@ func rot8nc(x uint8, z uint) uint8 { z &= 7 - // amd64:"ROLB",-"ANDQ" - // riscv64: "OR","SLL","SRL",-"AND\t" + // amd64:"ROLB" -"ANDQ" + // riscv64: "OR" "SLL" "SRL" -"AND " a += x<>(8-z) - // amd64:"RORB",-"ANDQ" - // riscv64: "OR","SLL","SRL",-"AND\t" + // amd64:"RORB" -"ANDQ" + // riscv64: "OR" "SLL" "SRL" -"AND " a += x>>z | x<<(8-z) return a @@ -210,14 +210,14 @@ func rot8nc(x uint8, z uint) uint8 { // Issue 18254: rotate after inlining func f32(x uint32) uint32 { - // amd64:"ROLL\t[$]7" + // amd64:"ROLL [$]7" return rot32nc(x, 7) } func doubleRotate(x uint64) uint64 { x = (x << 5) | (x >> 59) - // amd64:"ROLQ\t[$]15" - // arm64:"ROR\t[$]49" + // amd64:"ROLQ [$]15" + // arm64:"ROR [$]49" x = (x << 10) | (x >> 54) return x } @@ -229,51 +229,51 @@ func doubleRotate(x uint64) uint64 { func checkMaskedRotate32(a []uint32, r int) { i := 0 - // ppc64x: "RLWNM\t[$]16, R[0-9]+, [$]8, [$]15, R[0-9]+" + // ppc64x: "RLWNM [$]16, R[0-9]+, [$]8, [$]15, R[0-9]+" a[i] = bits.RotateLeft32(a[i], 16) & 0xFF0000 i++ - // ppc64x: "RLWNM\t[$]16, R[0-9]+, [$]8, [$]15, R[0-9]+" + // ppc64x: "RLWNM [$]16, R[0-9]+, [$]8, [$]15, R[0-9]+" a[i] = bits.RotateLeft32(a[i]&0xFF, 16) i++ - // ppc64x: "RLWNM\t[$]4, R[0-9]+, [$]20, [$]27, R[0-9]+" + // ppc64x: "RLWNM [$]4, R[0-9]+, [$]20, [$]27, R[0-9]+" a[i] = bits.RotateLeft32(a[i], 4) & 0xFF0 i++ - // ppc64x: "RLWNM\t[$]16, R[0-9]+, [$]24, [$]31, R[0-9]+" + // ppc64x: "RLWNM [$]16, R[0-9]+, [$]24, [$]31, R[0-9]+" a[i] = bits.RotateLeft32(a[i]&0xFF0000, 16) i++ - // ppc64x: "RLWNM\tR[0-9]+, R[0-9]+, [$]8, [$]15, R[0-9]+" + // ppc64x: "RLWNM R[0-9]+, R[0-9]+, [$]8, [$]15, R[0-9]+" a[i] = bits.RotateLeft32(a[i], r) & 0xFF0000 i++ - // ppc64x: "RLWNM\tR[0-9]+, R[0-9]+, [$]16, [$]23, R[0-9]+" + // ppc64x: "RLWNM R[0-9]+, R[0-9]+, [$]16, [$]23, R[0-9]+" a[i] = bits.RotateLeft32(a[i], r) & 0xFF00 i++ - // ppc64x: "RLWNM\tR[0-9]+, R[0-9]+, [$]20, [$]11, R[0-9]+" + // ppc64x: "RLWNM R[0-9]+, R[0-9]+, [$]20, [$]11, R[0-9]+" a[i] = bits.RotateLeft32(a[i], r) & 0xFFF00FFF i++ - // ppc64x: "RLWNM\t[$]4, R[0-9]+, [$]20, [$]11, R[0-9]+" + // ppc64x: "RLWNM [$]4, R[0-9]+, [$]20, [$]11, R[0-9]+" a[i] = bits.RotateLeft32(a[i], 4) & 0xFFF00FFF i++ } // combined arithmetic and rotate on arm64 func checkArithmeticWithRotate(a *[1000]uint64) { - // arm64: "AND\tR[0-9]+@>51, R[0-9]+, R[0-9]+" + // arm64: "AND R[0-9]+@>51, R[0-9]+, R[0-9]+" a[2] = a[1] & bits.RotateLeft64(a[0], 13) - // arm64: "ORR\tR[0-9]+@>51, R[0-9]+, R[0-9]+" + // arm64: "ORR R[0-9]+@>51, R[0-9]+, R[0-9]+" a[5] = a[4] | bits.RotateLeft64(a[3], 13) - // arm64: "EOR\tR[0-9]+@>51, R[0-9]+, R[0-9]+" + // arm64: "EOR R[0-9]+@>51, R[0-9]+, R[0-9]+" a[8] = a[7] ^ bits.RotateLeft64(a[6], 13) - // arm64: "MVN\tR[0-9]+@>51, R[0-9]+" + // arm64: "MVN R[0-9]+@>51, R[0-9]+" a[10] = ^bits.RotateLeft64(a[9], 13) - // arm64: "BIC\tR[0-9]+@>51, R[0-9]+, R[0-9]+" + // arm64: "BIC R[0-9]+@>51, R[0-9]+, R[0-9]+" a[13] = a[12] &^ bits.RotateLeft64(a[11], 13) - // arm64: "EON\tR[0-9]+@>51, R[0-9]+, R[0-9]+" + // arm64: "EON R[0-9]+@>51, R[0-9]+, R[0-9]+" a[16] = a[15] ^ ^bits.RotateLeft64(a[14], 13) - // arm64: "ORN\tR[0-9]+@>51, R[0-9]+, R[0-9]+" + // arm64: "ORN R[0-9]+@>51, R[0-9]+, R[0-9]+" a[19] = a[18] | ^bits.RotateLeft64(a[17], 13) - // arm64: "TST\tR[0-9]+@>51, R[0-9]+" + // arm64: "TST R[0-9]+@>51, R[0-9]+" if a[18]&bits.RotateLeft64(a[19], 13) == 0 { a[20] = 1 } diff --git a/test/codegen/schedule.go b/test/codegen/schedule.go index aafffd817ba..bb8a81af78e 100644 --- a/test/codegen/schedule.go +++ b/test/codegen/schedule.go @@ -8,8 +8,8 @@ package codegen func f(n int) int { r := 0 - // arm64:-"MOVD\t R" - // amd64:-"LEAQ","INCQ" + // arm64:-"MOVD R" + // amd64:-"LEAQ" "INCQ" for i := range n { r += i } diff --git a/test/codegen/select.go b/test/codegen/select.go index 82f6d1c7efe..46532d4012a 100644 --- a/test/codegen/select.go +++ b/test/codegen/select.go @@ -10,7 +10,7 @@ func f() { ch1 := make(chan int) ch2 := make(chan int) for { - // amd64:-`MOVQ\t[$]0, command-line-arguments..autotmp_3` + // amd64:-`MOVQ [$]0, command-line-arguments..autotmp_3` select { case <-ch1: case <-ch2: diff --git a/test/codegen/shift.go b/test/codegen/shift.go index 4b0885a4ddd..1877247af4d 100644 --- a/test/codegen/shift.go +++ b/test/codegen/shift.go @@ -13,57 +13,57 @@ package codegen func lshConst64x64(v int64) int64 { // loong64:"SLLV" // ppc64x:"SLD" - // riscv64:"SLLI",-"AND",-"SLTIU" + // riscv64:"SLLI" -"AND" -"SLTIU" return v << uint64(33) } func rshConst64Ux64(v uint64) uint64 { // loong64:"SRLV" // ppc64x:"SRD" - // riscv64:"SRLI\t",-"AND",-"SLTIU" + // riscv64:"SRLI " -"AND" -"SLTIU" return v >> uint64(33) } func rshConst64Ux64Overflow32(v uint32) uint64 { - // loong64:"MOVV\tR0,",-"SRL\t" - // riscv64:"MOV\t\\$0,",-"SRL" + // loong64:"MOVV R0," -"SRL " + // riscv64:"MOV [$]0," -"SRL" return uint64(v) >> 32 } func rshConst64Ux64Overflow16(v uint16) uint64 { - // loong64:"MOVV\tR0,",-"SRLV" - // riscv64:"MOV\t\\$0,",-"SRL" + // loong64:"MOVV R0," -"SRLV" + // riscv64:"MOV [$]0," -"SRL" return uint64(v) >> 16 } func rshConst64Ux64Overflow8(v uint8) uint64 { - // loong64:"MOVV\tR0,",-"SRLV" - // riscv64:"MOV\t\\$0,",-"SRL" + // loong64:"MOVV R0," -"SRLV" + // riscv64:"MOV [$]0," -"SRL" return uint64(v) >> 8 } func rshConst64x64(v int64) int64 { // loong64:"SRAV" // ppc64x:"SRAD" - // riscv64:"SRAI\t",-"OR",-"SLTIU" + // riscv64:"SRAI " -"OR" -"SLTIU" return v >> uint64(33) } func rshConst64x64Overflow32(v int32) int64 { - // loong64:"SRA\t\\$31" - // riscv64:"SRAIW",-"SLLI",-"SRAI\t" + // loong64:"SRA [$]31" + // riscv64:"SRAIW" -"SLLI" -"SRAI " return int64(v) >> 32 } func rshConst64x64Overflow16(v int16) int64 { - // loong64:"SLLV\t\\$48","SRAV\t\\$63" - // riscv64:"SLLI","SRAI",-"SRAIW" + // loong64:"SLLV [$]48" "SRAV [$]63" + // riscv64:"SLLI" "SRAI" -"SRAIW" return int64(v) >> 16 } func rshConst64x64Overflow8(v int8) int64 { - // loong64:"SLLV\t\\$56","SRAV\t\\$63" - // riscv64:"SLLI","SRAI",-"SRAIW" + // loong64:"SLLV [$]56" "SRAV [$]63" + // riscv64:"SLLI" "SRAI" -"SRAIW" return int64(v) >> 8 } @@ -78,84 +78,84 @@ func lshConst64x1(v int64) int64 { } func lshConst32x64(v int32) int32 { - // loong64:"SLL\t" + // loong64:"SLL " // ppc64x:"SLW" - // riscv64:"SLLI",-"AND",-"SLTIU", -"MOVW" + // riscv64:"SLLI" -"AND" -"SLTIU", -"MOVW" return v << uint64(29) } func rshConst32Ux64(v uint32) uint32 { - // loong64:"SRL\t" + // loong64:"SRL " // ppc64x:"SRW" - // riscv64:"SRLIW",-"AND",-"SLTIU", -"MOVW" + // riscv64:"SRLIW" -"AND" -"SLTIU", -"MOVW" return v >> uint64(29) } func rshConst32x64(v int32) int32 { - // loong64:"SRA\t" + // loong64:"SRA " // ppc64x:"SRAW" - // riscv64:"SRAIW",-"OR",-"SLTIU", -"MOVW" + // riscv64:"SRAIW" -"OR" -"SLTIU", -"MOVW" return v >> uint64(29) } func lshConst64x32(v int64) int64 { // loong64:"SLLV" // ppc64x:"SLD" - // riscv64:"SLLI",-"AND",-"SLTIU" + // riscv64:"SLLI" -"AND" -"SLTIU" return v << uint32(33) } func rshConst64Ux32(v uint64) uint64 { // loong64:"SRLV" // ppc64x:"SRD" - // riscv64:"SRLI\t",-"AND",-"SLTIU" + // riscv64:"SRLI " -"AND" -"SLTIU" return v >> uint32(33) } func rshConst64x32(v int64) int64 { // loong64:"SRAV" // ppc64x:"SRAD" - // riscv64:"SRAI\t",-"OR",-"SLTIU" + // riscv64:"SRAI " -"OR" -"SLTIU" return v >> uint32(33) } func lshConst32x1Add(x int32) int32 { - // amd64:"SHLL\t[$]2" - // loong64:"SLL\t[$]2" - // riscv64:"SLLI\t[$]2" + // amd64:"SHLL [$]2" + // loong64:"SLL [$]2" + // riscv64:"SLLI [$]2" return (x + x) << 1 } func lshConst64x1Add(x int64) int64 { - // amd64:"SHLQ\t[$]2" - // loong64:"SLLV\t[$]2" - // riscv64:"SLLI\t[$]2" + // amd64:"SHLQ [$]2" + // loong64:"SLLV [$]2" + // riscv64:"SLLI [$]2" return (x + x) << 1 } func lshConst32x2Add(x int32) int32 { - // amd64:"SHLL\t[$]3" - // loong64:"SLL\t[$]3" - // riscv64:"SLLI\t[$]3" + // amd64:"SHLL [$]3" + // loong64:"SLL [$]3" + // riscv64:"SLLI [$]3" return (x + x) << 2 } func lshConst64x2Add(x int64) int64 { - // amd64:"SHLQ\t[$]3" - // loong64:"SLLV\t[$]3" - // riscv64:"SLLI\t[$]3" + // amd64:"SHLQ [$]3" + // loong64:"SLLV [$]3" + // riscv64:"SLLI [$]3" return (x + x) << 2 } func lshConst32x31Add(x int32) int32 { - // loong64:-"SLL\t","MOVV\tR0" - // riscv64:-"SLLI","MOV\t[$]0" + // loong64:-"SLL " "MOVV R0" + // riscv64:-"SLLI" "MOV [$]0" return (x + x) << 31 } func lshConst64x63Add(x int64) int64 { - // loong64:-"SLLV","MOVV\tR0" - // riscv64:-"SLLI","MOV\t[$]0" + // loong64:-"SLLV" "MOVV R0" + // riscv64:-"SLLI" "MOV [$]0" return (x + x) << 63 } @@ -164,121 +164,121 @@ func lshConst64x63Add(x int64) int64 { // ------------------ // func lshMask64x64(v int64, s uint64) int64 { - // arm64:"LSL",-"AND" - // loong64:"SLLV",-"AND" - // ppc64x:"RLDICL",-"ORN",-"ISEL" - // riscv64:"SLL",-"AND\t",-"SLTIU" - // s390x:-"RISBGZ",-"AND",-"LOCGR" + // arm64:"LSL" -"AND" + // loong64:"SLLV" -"AND" + // ppc64x:"RLDICL" -"ORN" -"ISEL" + // riscv64:"SLL" -"AND " -"SLTIU" + // s390x:-"RISBGZ" -"AND" -"LOCGR" return v << (s & 63) } func rshMask64Ux64(v uint64, s uint64) uint64 { - // arm64:"LSR",-"AND",-"CSEL" - // loong64:"SRLV",-"AND" - // ppc64x:"RLDICL",-"ORN",-"ISEL" - // riscv64:"SRL\t",-"AND\t",-"SLTIU" - // s390x:-"RISBGZ",-"AND",-"LOCGR" + // arm64:"LSR" -"AND" -"CSEL" + // loong64:"SRLV" -"AND" + // ppc64x:"RLDICL" -"ORN" -"ISEL" + // riscv64:"SRL " -"AND " -"SLTIU" + // s390x:-"RISBGZ" -"AND" -"LOCGR" return v >> (s & 63) } func rshMask64x64(v int64, s uint64) int64 { - // arm64:"ASR",-"AND",-"CSEL" - // loong64:"SRAV",-"AND" - // ppc64x:"RLDICL",-"ORN",-"ISEL" - // riscv64:"SRA\t",-"OR",-"SLTIU" - // s390x:-"RISBGZ",-"AND",-"LOCGR" + // arm64:"ASR" -"AND" -"CSEL" + // loong64:"SRAV" -"AND" + // ppc64x:"RLDICL" -"ORN" -"ISEL" + // riscv64:"SRA " -"OR" -"SLTIU" + // s390x:-"RISBGZ" -"AND" -"LOCGR" return v >> (s & 63) } func lshMask32x64(v int32, s uint64) int32 { - // arm64:"LSL",-"AND" - // loong64:"SLL\t","AND","SGTU","MASKEQZ" - // ppc64x:"ISEL",-"ORN" - // riscv64:"SLL",-"AND\t",-"SLTIU" - // s390x:-"RISBGZ",-"AND",-"LOCGR" + // arm64:"LSL" -"AND" + // loong64:"SLL " "AND" "SGTU" "MASKEQZ" + // ppc64x:"ISEL" -"ORN" + // riscv64:"SLL" -"AND " -"SLTIU" + // s390x:-"RISBGZ" -"AND" -"LOCGR" return v << (s & 63) } func lsh5Mask32x64(v int32, s uint64) int32 { - // loong64:"SLL\t",-"AND" + // loong64:"SLL " -"AND" return v << (s & 31) } func rshMask32Ux64(v uint32, s uint64) uint32 { - // arm64:"LSR",-"AND" - // loong64:"SRL\t","AND","SGTU","MASKEQZ" - // ppc64x:"ISEL",-"ORN" - // riscv64:"SRLW","SLTIU","NEG","AND\t",-"SRL\t" - // s390x:-"RISBGZ",-"AND",-"LOCGR" + // arm64:"LSR" -"AND" + // loong64:"SRL " "AND" "SGTU" "MASKEQZ" + // ppc64x:"ISEL" -"ORN" + // riscv64:"SRLW" "SLTIU" "NEG" "AND " -"SRL " + // s390x:-"RISBGZ" -"AND" -"LOCGR" return v >> (s & 63) } func rsh5Mask32Ux64(v uint32, s uint64) uint32 { - // loong64:"SRL\t",-"AND" - // riscv64:"SRLW",-"AND\t",-"SLTIU",-"SRL\t" + // loong64:"SRL " -"AND" + // riscv64:"SRLW" -"AND " -"SLTIU" -"SRL " return v >> (s & 31) } func rshMask32x64(v int32, s uint64) int32 { - // arm64:"ASR",-"AND" - // loong64:"SRA\t","AND","SGTU","SUBVU","OR" - // ppc64x:"ISEL",-"ORN" - // riscv64:"SRAW","OR","SLTIU" - // s390x:-"RISBGZ",-"AND",-"LOCGR" + // arm64:"ASR" -"AND" + // loong64:"SRA " "AND" "SGTU" "SUBVU" "OR" + // ppc64x:"ISEL" -"ORN" + // riscv64:"SRAW" "OR" "SLTIU" + // s390x:-"RISBGZ" -"AND" -"LOCGR" return v >> (s & 63) } func rsh5Mask32x64(v int32, s uint64) int32 { - // loong64:"SRA\t",-"AND" - // riscv64:"SRAW",-"OR",-"SLTIU" + // loong64:"SRA " -"AND" + // riscv64:"SRAW" -"OR" -"SLTIU" return v >> (s & 31) } func lshMask64x32(v int64, s uint32) int64 { - // arm64:"LSL",-"AND" - // loong64:"SLLV",-"AND" - // ppc64x:"RLDICL",-"ORN" - // riscv64:"SLL",-"AND\t",-"SLTIU" - // s390x:-"RISBGZ",-"AND",-"LOCGR" + // arm64:"LSL" -"AND" + // loong64:"SLLV" -"AND" + // ppc64x:"RLDICL" -"ORN" + // riscv64:"SLL" -"AND " -"SLTIU" + // s390x:-"RISBGZ" -"AND" -"LOCGR" return v << (s & 63) } func rshMask64Ux32(v uint64, s uint32) uint64 { - // arm64:"LSR",-"AND",-"CSEL" - // loong64:"SRLV",-"AND" - // ppc64x:"RLDICL",-"ORN" - // riscv64:"SRL\t",-"AND\t",-"SLTIU" - // s390x:-"RISBGZ",-"AND",-"LOCGR" + // arm64:"LSR" -"AND" -"CSEL" + // loong64:"SRLV" -"AND" + // ppc64x:"RLDICL" -"ORN" + // riscv64:"SRL " -"AND " -"SLTIU" + // s390x:-"RISBGZ" -"AND" -"LOCGR" return v >> (s & 63) } func rshMask64x32(v int64, s uint32) int64 { - // arm64:"ASR",-"AND",-"CSEL" - // loong64:"SRAV",-"AND" - // ppc64x:"RLDICL",-"ORN",-"ISEL" - // riscv64:"SRA\t",-"OR",-"SLTIU" - // s390x:-"RISBGZ",-"AND",-"LOCGR" + // arm64:"ASR" -"AND" -"CSEL" + // loong64:"SRAV" -"AND" + // ppc64x:"RLDICL" -"ORN" -"ISEL" + // riscv64:"SRA " -"OR" -"SLTIU" + // s390x:-"RISBGZ" -"AND" -"LOCGR" return v >> (s & 63) } func lshMask64x32Ext(v int64, s int32) int64 { - // ppc64x:"RLDICL",-"ORN",-"ISEL" - // riscv64:"SLL",-"AND\t",-"SLTIU" - // s390x:-"RISBGZ",-"AND",-"LOCGR" + // ppc64x:"RLDICL" -"ORN" -"ISEL" + // riscv64:"SLL" -"AND " -"SLTIU" + // s390x:-"RISBGZ" -"AND" -"LOCGR" return v << uint(s&63) } func rshMask64Ux32Ext(v uint64, s int32) uint64 { - // ppc64x:"RLDICL",-"ORN",-"ISEL" - // riscv64:"SRL\t",-"AND\t",-"SLTIU" - // s390x:-"RISBGZ",-"AND",-"LOCGR" + // ppc64x:"RLDICL" -"ORN" -"ISEL" + // riscv64:"SRL " -"AND " -"SLTIU" + // s390x:-"RISBGZ" -"AND" -"LOCGR" return v >> uint(s&63) } func rshMask64x32Ext(v int64, s int32) int64 { - // ppc64x:"RLDICL",-"ORN",-"ISEL" - // riscv64:"SRA\t",-"OR",-"SLTIU" - // s390x:-"RISBGZ",-"AND",-"LOCGR" + // ppc64x:"RLDICL" -"ORN" -"ISEL" + // riscv64:"SRA " -"OR" -"SLTIU" + // s390x:-"RISBGZ" -"AND" -"LOCGR" return v >> uint(s&63) } @@ -316,10 +316,10 @@ func lshSignedMasked(v8 int8, v16 int16, v32 int32, v64 int64, x int) { func lshGuarded64(v int64, s uint) int64 { if s < 64 { - // riscv64:"SLL",-"AND",-"SLTIU" - // s390x:-"RISBGZ",-"AND",-"LOCGR" - // wasm:-"Select",-".*LtU" - // arm64:"LSL",-"CSEL" + // riscv64:"SLL" -"AND" -"SLTIU" + // s390x:-"RISBGZ" -"AND" -"LOCGR" + // wasm:-"Select" -".*LtU" + // arm64:"LSL" -"CSEL" return v << s } panic("shift too large") @@ -327,10 +327,10 @@ func lshGuarded64(v int64, s uint) int64 { func rshGuarded64U(v uint64, s uint) uint64 { if s < 64 { - // riscv64:"SRL\t",-"AND",-"SLTIU" - // s390x:-"RISBGZ",-"AND",-"LOCGR" - // wasm:-"Select",-".*LtU" - // arm64:"LSR",-"CSEL" + // riscv64:"SRL " -"AND" -"SLTIU" + // s390x:-"RISBGZ" -"AND" -"LOCGR" + // wasm:-"Select" -".*LtU" + // arm64:"LSR" -"CSEL" return v >> s } panic("shift too large") @@ -338,10 +338,10 @@ func rshGuarded64U(v uint64, s uint) uint64 { func rshGuarded64(v int64, s uint) int64 { if s < 64 { - // riscv64:"SRA\t",-"OR",-"SLTIU" - // s390x:-"RISBGZ",-"AND",-"LOCGR" - // wasm:-"Select",-".*LtU" - // arm64:"ASR",-"CSEL" + // riscv64:"SRA " -"OR" -"SLTIU" + // s390x:-"RISBGZ" -"AND" -"LOCGR" + // wasm:-"Select" -".*LtU" + // arm64:"ASR" -"CSEL" return v >> s } panic("shift too large") @@ -349,19 +349,19 @@ func rshGuarded64(v int64, s uint) int64 { func provedUnsignedShiftLeft(val64 uint64, val32 uint32, val16 uint16, val8 uint8, shift int) (r1 uint64, r2 uint32, r3 uint16, r4 uint8) { if shift >= 0 && shift < 64 { - // arm64:"LSL",-"CSEL" + // arm64:"LSL" -"CSEL" r1 = val64 << shift } if shift >= 0 && shift < 32 { - // arm64:"LSL",-"CSEL" + // arm64:"LSL" -"CSEL" r2 = val32 << shift } if shift >= 0 && shift < 16 { - // arm64:"LSL",-"CSEL" + // arm64:"LSL" -"CSEL" r3 = val16 << shift } if shift >= 0 && shift < 8 { - // arm64:"LSL",-"CSEL" + // arm64:"LSL" -"CSEL" r4 = val8 << shift } return r1, r2, r3, r4 @@ -369,19 +369,19 @@ func provedUnsignedShiftLeft(val64 uint64, val32 uint32, val16 uint16, val8 uint func provedSignedShiftLeft(val64 int64, val32 int32, val16 int16, val8 int8, shift int) (r1 int64, r2 int32, r3 int16, r4 int8) { if shift >= 0 && shift < 64 { - // arm64:"LSL",-"CSEL" + // arm64:"LSL" -"CSEL" r1 = val64 << shift } if shift >= 0 && shift < 32 { - // arm64:"LSL",-"CSEL" + // arm64:"LSL" -"CSEL" r2 = val32 << shift } if shift >= 0 && shift < 16 { - // arm64:"LSL",-"CSEL" + // arm64:"LSL" -"CSEL" r3 = val16 << shift } if shift >= 0 && shift < 8 { - // arm64:"LSL",-"CSEL" + // arm64:"LSL" -"CSEL" r4 = val8 << shift } return r1, r2, r3, r4 @@ -389,19 +389,19 @@ func provedSignedShiftLeft(val64 int64, val32 int32, val16 int16, val8 int8, shi func provedUnsignedShiftRight(val64 uint64, val32 uint32, val16 uint16, val8 uint8, shift int) (r1 uint64, r2 uint32, r3 uint16, r4 uint8) { if shift >= 0 && shift < 64 { - // arm64:"LSR",-"CSEL" + // arm64:"LSR" -"CSEL" r1 = val64 >> shift } if shift >= 0 && shift < 32 { - // arm64:"LSR",-"CSEL" + // arm64:"LSR" -"CSEL" r2 = val32 >> shift } if shift >= 0 && shift < 16 { - // arm64:"LSR",-"CSEL" + // arm64:"LSR" -"CSEL" r3 = val16 >> shift } if shift >= 0 && shift < 8 { - // arm64:"LSR",-"CSEL" + // arm64:"LSR" -"CSEL" r4 = val8 >> shift } return r1, r2, r3, r4 @@ -409,19 +409,19 @@ func provedUnsignedShiftRight(val64 uint64, val32 uint32, val16 uint16, val8 uin func provedSignedShiftRight(val64 int64, val32 int32, val16 int16, val8 int8, shift int) (r1 int64, r2 int32, r3 int16, r4 int8) { if shift >= 0 && shift < 64 { - // arm64:"ASR",-"CSEL" + // arm64:"ASR" -"CSEL" r1 = val64 >> shift } if shift >= 0 && shift < 32 { - // arm64:"ASR",-"CSEL" + // arm64:"ASR" -"CSEL" r2 = val32 >> shift } if shift >= 0 && shift < 16 { - // arm64:"ASR",-"CSEL" + // arm64:"ASR" -"CSEL" r3 = val16 >> shift } if shift >= 0 && shift < 8 { - // arm64:"ASR",-"CSEL" + // arm64:"ASR" -"CSEL" r4 = val8 >> shift } return r1, r2, r3, r4 @@ -429,36 +429,36 @@ func provedSignedShiftRight(val64 int64, val32 int32, val16 int16, val8 int8, sh func checkUnneededTrunc(tab *[100000]uint32, d uint64, v uint32, h uint16, b byte) (uint32, uint64) { - // ppc64x:-".*RLWINM",-".*RLDICR",".*CLRLSLDI" + // ppc64x:-".*RLWINM" -".*RLDICR" ".*CLRLSLDI" f := tab[byte(v)^b] - // ppc64x:-".*RLWINM",-".*RLDICR",".*CLRLSLDI" + // ppc64x:-".*RLWINM" -".*RLDICR" ".*CLRLSLDI" f += tab[byte(v)&b] - // ppc64x:-".*RLWINM",-".*RLDICR",".*CLRLSLDI" + // ppc64x:-".*RLWINM" -".*RLDICR" ".*CLRLSLDI" f += tab[byte(v)|b] - // ppc64x:-".*RLWINM",-".*RLDICR",".*CLRLSLDI" + // ppc64x:-".*RLWINM" -".*RLDICR" ".*CLRLSLDI" f += tab[uint16(v)&h] - // ppc64x:-".*RLWINM",-".*RLDICR",".*CLRLSLDI" + // ppc64x:-".*RLWINM" -".*RLDICR" ".*CLRLSLDI" f += tab[uint16(v)^h] - // ppc64x:-".*RLWINM",-".*RLDICR",".*CLRLSLDI" + // ppc64x:-".*RLWINM" -".*RLDICR" ".*CLRLSLDI" f += tab[uint16(v)|h] - // ppc64x:-".*AND",-"RLDICR",".*CLRLSLDI" + // ppc64x:-".*AND" -"RLDICR" ".*CLRLSLDI" f += tab[v&0xff] - // ppc64x:-".*AND",".*CLRLSLWI" + // ppc64x:-".*AND" ".*CLRLSLWI" f += 2 * uint32(uint16(d)) - // ppc64x:-".*AND",-"RLDICR",".*CLRLSLDI" + // ppc64x:-".*AND" -"RLDICR" ".*CLRLSLDI" g := 2 * uint64(uint32(d)) return f, g } func checkCombinedShifts(v8 uint8, v16 uint16, v32 uint32, x32 int32, v64 uint64) (uint8, uint16, uint32, uint64, int64) { - // ppc64x:-"AND","CLRLSLWI" + // ppc64x:-"AND" "CLRLSLWI" f := (v8 & 0xF) << 2 // ppc64x:"CLRLSLWI" f += byte(v16) << 3 - // ppc64x:-"AND","CLRLSLWI" + // ppc64x:-"AND" "CLRLSLWI" g := (v16 & 0xFF) << 3 - // ppc64x:-"AND","CLRLSLWI" + // ppc64x:-"AND" "CLRLSLWI" h := (v32 & 0xFFFFF) << 2 // ppc64x:"CLRLSLDI" i := (v64 & 0xFFFFFFFF) << 5 @@ -466,7 +466,7 @@ func checkCombinedShifts(v8 uint8, v16 uint16, v32 uint32, x32 int32, v64 uint64 i += (v64 & 0xFFFFFFF) << 38 // ppc64x/power9:-"CLRLSLDI" i += (v64 & 0xFFFF00) << 10 - // ppc64x/power9:-"SLD","EXTSWSLI" + // ppc64x/power9:-"SLD" "EXTSWSLI" j := int64(x32+32) * 8 return f, g, h, i, j } @@ -497,71 +497,71 @@ func checkWidenAfterShift(v int64, u uint64) (int64, uint64) { func checkShiftAndMask32(v []uint32) { i := 0 - // ppc64x: "RLWNM\t[$]24, R[0-9]+, [$]12, [$]19, R[0-9]+" + // ppc64x: "RLWNM [$]24, R[0-9]+, [$]12, [$]19, R[0-9]+" v[i] = (v[i] & 0xFF00000) >> 8 i++ - // ppc64x: "RLWNM\t[$]26, R[0-9]+, [$]22, [$]29, R[0-9]+" + // ppc64x: "RLWNM [$]26, R[0-9]+, [$]22, [$]29, R[0-9]+" v[i] = (v[i] & 0xFF00) >> 6 i++ - // ppc64x: "MOVW\tR0" + // ppc64x: "MOVW R0" v[i] = (v[i] & 0xFF) >> 8 i++ - // ppc64x: "MOVW\tR0" + // ppc64x: "MOVW R0" v[i] = (v[i] & 0xF000000) >> 28 i++ - // ppc64x: "RLWNM\t[$]26, R[0-9]+, [$]24, [$]31, R[0-9]+" + // ppc64x: "RLWNM [$]26, R[0-9]+, [$]24, [$]31, R[0-9]+" v[i] = (v[i] >> 6) & 0xFF i++ - // ppc64x: "RLWNM\t[$]26, R[0-9]+, [$]12, [$]19, R[0-9]+" + // ppc64x: "RLWNM [$]26, R[0-9]+, [$]12, [$]19, R[0-9]+" v[i] = (v[i] >> 6) & 0xFF000 i++ - // ppc64x: "MOVW\tR0" + // ppc64x: "MOVW R0" v[i] = (v[i] >> 20) & 0xFF000 i++ - // ppc64x: "MOVW\tR0" + // ppc64x: "MOVW R0" v[i] = (v[i] >> 24) & 0xFF00 i++ } func checkMergedShifts32(a [256]uint32, b [256]uint64, u uint32, v uint32) { - // ppc64x: -"CLRLSLDI", "RLWNM\t[$]10, R[0-9]+, [$]22, [$]29, R[0-9]+" + // ppc64x: -"CLRLSLDI", "RLWNM [$]10, R[0-9]+, [$]22, [$]29, R[0-9]+" a[0] = a[uint8(v>>24)] - // ppc64x: -"CLRLSLDI", "RLWNM\t[$]11, R[0-9]+, [$]21, [$]28, R[0-9]+" + // ppc64x: -"CLRLSLDI", "RLWNM [$]11, R[0-9]+, [$]21, [$]28, R[0-9]+" b[0] = b[uint8(v>>24)] - // ppc64x: -"CLRLSLDI", "RLWNM\t[$]15, R[0-9]+, [$]21, [$]28, R[0-9]+" + // ppc64x: -"CLRLSLDI", "RLWNM [$]15, R[0-9]+, [$]21, [$]28, R[0-9]+" b[1] = b[(v>>20)&0xFF] - // ppc64x: -"SLD", "RLWNM\t[$]10, R[0-9]+, [$]22, [$]28, R[0-9]+" + // ppc64x: -"SLD", "RLWNM [$]10, R[0-9]+, [$]22, [$]28, R[0-9]+" b[2] = b[v>>25] } func checkMergedShifts64(a [256]uint32, b [256]uint64, c [256]byte, v uint64) { - // ppc64x: -"CLRLSLDI", "RLWNM\t[$]10, R[0-9]+, [$]22, [$]29, R[0-9]+" + // ppc64x: -"CLRLSLDI", "RLWNM [$]10, R[0-9]+, [$]22, [$]29, R[0-9]+" a[0] = a[uint8(v>>24)] // ppc64x: "SRD", "CLRLSLDI", -"RLWNM" a[1] = a[uint8(v>>25)] - // ppc64x: -"CLRLSLDI", "RLWNM\t[$]9, R[0-9]+, [$]23, [$]29, R[0-9]+" + // ppc64x: -"CLRLSLDI", "RLWNM [$]9, R[0-9]+, [$]23, [$]29, R[0-9]+" a[2] = a[v>>25&0x7F] - // ppc64x: -"CLRLSLDI", "RLWNM\t[$]3, R[0-9]+, [$]29, [$]29, R[0-9]+" + // ppc64x: -"CLRLSLDI", "RLWNM [$]3, R[0-9]+, [$]29, [$]29, R[0-9]+" a[3] = a[(v>>31)&0x01] - // ppc64x: -"CLRLSLDI", "RLWNM\t[$]12, R[0-9]+, [$]21, [$]28, R[0-9]+" + // ppc64x: -"CLRLSLDI", "RLWNM [$]12, R[0-9]+, [$]21, [$]28, R[0-9]+" b[0] = b[uint8(v>>23)] - // ppc64x: -"CLRLSLDI", "RLWNM\t[$]15, R[0-9]+, [$]21, [$]28, R[0-9]+" + // ppc64x: -"CLRLSLDI", "RLWNM [$]15, R[0-9]+, [$]21, [$]28, R[0-9]+" b[1] = b[(v>>20)&0xFF] // ppc64x: "RLWNM", -"SLD" b[2] = b[((uint64((uint32(v) >> 21)) & 0x3f) << 4)] // ppc64x: -"RLWNM" b[3] = (b[3] << 24) & 0xFFFFFF000000 - // ppc64x: "RLWNM\t[$]24, R[0-9]+, [$]0, [$]7," + // ppc64x: "RLWNM [$]24, R[0-9]+, [$]0, [$]7," b[4] = (b[4] << 24) & 0xFF000000 - // ppc64x: "RLWNM\t[$]24, R[0-9]+, [$]0, [$]7," + // ppc64x: "RLWNM [$]24, R[0-9]+, [$]0, [$]7," b[5] = (b[5] << 24) & 0xFF00000F // ppc64x: -"RLWNM" b[6] = (b[6] << 0) & 0xFF00000F - // ppc64x: "RLWNM\t[$]4, R[0-9]+, [$]28, [$]31," + // ppc64x: "RLWNM [$]4, R[0-9]+, [$]28, [$]31," b[7] = (b[7] >> 28) & 0xF - // ppc64x: "RLWNM\t[$]11, R[0-9]+, [$]10, [$]15" + // ppc64x: "RLWNM [$]11, R[0-9]+, [$]10, [$]15" c[0] = c[((v>>5)&0x3F)<<16] - // ppc64x: "ANDCC\t[$]8064," + // ppc64x: "ANDCC [$]8064," c[1] = c[((v>>7)&0x3F)<<7] } @@ -572,7 +572,7 @@ func checkShiftMask(a uint32, b uint64, z []uint32, y []uint64) { z[0] = uint32(uint8(a >> 5)) // ppc64x: -"MOVBZ", -"SRW", "RLWNM" z[1] = uint32(uint8((a >> 4) & 0x7e)) - // ppc64x: "RLWNM\t[$]25, R[0-9]+, [$]27, [$]29, R[0-9]+" + // ppc64x: "RLWNM [$]25, R[0-9]+, [$]27, [$]29, R[0-9]+" z[2] = uint32(uint8(a>>7)) & 0x1c // ppc64x: -"MOVWZ" y[0] = uint64((a >> 6) & 0x1c) @@ -598,11 +598,11 @@ func check128bitShifts(x, y uint64, bits uint) (uint64, uint64) { } func checkShiftToMask(u []uint64, s []int64) { - // amd64:-"SHR",-"SHL","ANDQ" + // amd64:-"SHR" -"SHL" "ANDQ" u[0] = u[0] >> 5 << 5 - // amd64:-"SAR",-"SHL","ANDQ" + // amd64:-"SAR" -"SHL" "ANDQ" s[0] = s[0] >> 5 << 5 - // amd64:-"SHR",-"SHL","ANDQ" + // amd64:-"SHR" -"SHL" "ANDQ" u[1] = u[1] << 5 >> 5 } @@ -611,13 +611,13 @@ func checkShiftToMask(u []uint64, s []int64) { // func checkLeftShiftWithAddition(a int64, b int64) int64 { - // riscv64/rva20u64: "SLLI","ADD" + // riscv64/rva20u64: "SLLI" "ADD" // riscv64/rva22u64,riscv64/rva23u64: "SH1ADD" a = a + b<<1 - // riscv64/rva20u64: "SLLI","ADD" + // riscv64/rva20u64: "SLLI" "ADD" // riscv64/rva22u64,riscv64/rva23u64: "SH2ADD" a = a + b<<2 - // riscv64/rva20u64: "SLLI","ADD" + // riscv64/rva20u64: "SLLI" "ADD" // riscv64/rva22u64,riscv64/rva23u64: "SH3ADD" a = a + b<<3 return a @@ -631,7 +631,7 @@ func rsh64Uto32U(v uint64) uint32 { x := uint32(v) // riscv64:"MOVWU" if x > 8 { - // riscv64:"SRLIW",-"MOVWU",-"SLLI" + // riscv64:"SRLIW" -"MOVWU" -"SLLI" x >>= 2 } return x @@ -641,7 +641,7 @@ func rsh64Uto16U(v uint64) uint16 { x := uint16(v) // riscv64:"MOVHU" if x > 8 { - // riscv64:"SLLI","SRLI" + // riscv64:"SLLI" "SRLI" x >>= 2 } return x @@ -651,7 +651,7 @@ func rsh64Uto8U(v uint64) uint8 { x := uint8(v) // riscv64:"MOVBU" if x > 8 { - // riscv64:"SLLI","SRLI" + // riscv64:"SLLI" "SRLI" x >>= 2 } return x @@ -661,7 +661,7 @@ func rsh64to32(v int64) int32 { x := int32(v) // riscv64:"MOVW" if x > 8 { - // riscv64:"SRAIW",-"MOVW",-"SLLI" + // riscv64:"SRAIW" -"MOVW" -"SLLI" x >>= 2 } return x @@ -671,7 +671,7 @@ func rsh64to16(v int64) int16 { x := int16(v) // riscv64:"MOVH" if x > 8 { - // riscv64:"SLLI","SRAI" + // riscv64:"SLLI" "SRAI" x >>= 2 } return x @@ -681,7 +681,7 @@ func rsh64to8(v int64) int8 { x := int8(v) // riscv64:"MOVB" if x > 8 { - // riscv64:"SLLI","SRAI" + // riscv64:"SLLI" "SRAI" x >>= 2 } return x @@ -692,6 +692,6 @@ func rsh64to8(v int64) int8 { // (There is still a negative shift test, but // no shift-too-big test.) func signedModShift(i int) int64 { - // arm64:-"CMP",-"CSEL" + // arm64:-"CMP" -"CSEL" return 1 << (i % 64) } diff --git a/test/codegen/slices.go b/test/codegen/slices.go index 1d918a3a0a6..1c48b38047e 100644 --- a/test/codegen/slices.go +++ b/test/codegen/slices.go @@ -46,7 +46,7 @@ func SliceExtensionConst(s []int) []int { // amd64:-`.*runtime\.memclrNoHeapPointers` // amd64:-`.*runtime\.makeslice` // amd64:-`.*runtime\.panicmakeslicelen` - // amd64:"MOVUPS\tX15" + // amd64:"MOVUPS X15" // loong64:-`.*runtime\.memclrNoHeapPointers` // ppc64x:-`.*runtime\.memclrNoHeapPointers` // ppc64x:-`.*runtime\.makeslice` @@ -58,7 +58,7 @@ func SliceExtensionConstInt64(s []int) []int { // amd64:-`.*runtime\.memclrNoHeapPointers` // amd64:-`.*runtime\.makeslice` // amd64:-`.*runtime\.panicmakeslicelen` - // amd64:"MOVUPS\tX15" + // amd64:"MOVUPS X15" // loong64:-`.*runtime\.memclrNoHeapPointers` // ppc64x:-`.*runtime\.memclrNoHeapPointers` // ppc64x:-`.*runtime\.makeslice` @@ -70,7 +70,7 @@ func SliceExtensionConstUint64(s []int) []int { // amd64:-`.*runtime\.memclrNoHeapPointers` // amd64:-`.*runtime\.makeslice` // amd64:-`.*runtime\.panicmakeslicelen` - // amd64:"MOVUPS\tX15" + // amd64:"MOVUPS X15" // loong64:-`.*runtime\.memclrNoHeapPointers` // ppc64x:-`.*runtime\.memclrNoHeapPointers` // ppc64x:-`.*runtime\.makeslice` @@ -82,7 +82,7 @@ func SliceExtensionConstUint(s []int) []int { // amd64:-`.*runtime\.memclrNoHeapPointers` // amd64:-`.*runtime\.makeslice` // amd64:-`.*runtime\.panicmakeslicelen` - // amd64:"MOVUPS\tX15" + // amd64:"MOVUPS X15" // loong64:-`.*runtime\.memclrNoHeapPointers` // ppc64x:-`.*runtime\.memclrNoHeapPointers` // ppc64x:-`.*runtime\.makeslice` @@ -352,12 +352,12 @@ func SliceNilCheck(s []int) { // ---------------------- // // See issue 21561 func InitSmallSliceLiteral() []int { - // amd64:`MOVQ\t[$]42` + // amd64:`MOVQ [$]42` return []int{42} } func InitNotSmallSliceLiteral() []int { - // amd64:`LEAQ\t.*stmp_` + // amd64:`LEAQ .*stmp_` return []int{ 42, 42, @@ -413,7 +413,7 @@ func SliceWithConstCompare(a []int, b int) []int { } func SliceWithSubtractBound(a []int, b int) []int { - // ppc64x:"SUBC",-"NEG" + // ppc64x:"SUBC" -"NEG" return a[(3 - b):] } @@ -422,7 +422,7 @@ func SliceWithSubtractBound(a []int, b int) []int { // --------------------------------------- // func SliceAndIndex(a []int, b int) int { - // arm64:"AND\tR[0-9]+->63","ADD\tR[0-9]+<<3" + // arm64:"AND R[0-9]+->63" "ADD R[0-9]+<<3" return a[b:][b] } @@ -445,12 +445,12 @@ func Slice0(p *struct{}, i int) []struct{} { // --------------------------------------- // func SlicePut(a []byte, c uint8) []byte { - // arm64:`CBZ\tR1` + // arm64:`CBZ R1` a[0] = c - // arm64:`CMP\t\$1, R1` + // arm64:`CMP \$1, R1` a = a[1:] a[0] = c - // arm64:`CMP\t\$2, R1` + // arm64:`CMP \$2, R1` a = a[1:] a[0] = c a = a[1:] diff --git a/test/codegen/smallintiface.go b/test/codegen/smallintiface.go index 0207a0af79f..e1dbae5f9ab 100644 --- a/test/codegen/smallintiface.go +++ b/test/codegen/smallintiface.go @@ -7,16 +7,16 @@ package codegen // license that can be found in the LICENSE file. func booliface() interface{} { - // amd64:`LEAQ\truntime.staticuint64s\+8\(SB\)` + // amd64:`LEAQ runtime.staticuint64s\+8\(SB\)` return true } func smallint8iface() interface{} { - // amd64:`LEAQ\truntime.staticuint64s\+2024\(SB\)` + // amd64:`LEAQ runtime.staticuint64s\+2024\(SB\)` return int8(-3) } func smalluint8iface() interface{} { - // amd64:`LEAQ\truntime.staticuint64s\+24\(SB\)` + // amd64:`LEAQ runtime.staticuint64s\+24\(SB\)` return uint8(3) } diff --git a/test/codegen/stack.go b/test/codegen/stack.go index 59284ae8886..4ce56eb1d97 100644 --- a/test/codegen/stack.go +++ b/test/codegen/stack.go @@ -16,13 +16,13 @@ import ( // Check that stack stores are optimized away. -// 386:"TEXT\t.*, [$]0-" -// amd64:"TEXT\t.*, [$]0-" -// arm:"TEXT\t.*, [$]-4-" -// arm64:"TEXT\t.*, [$]0-" -// mips:"TEXT\t.*, [$]-4-" -// ppc64x:"TEXT\t.*, [$]0-" -// s390x:"TEXT\t.*, [$]0-" +// 386:"TEXT .*, [$]0-" +// amd64:"TEXT .*, [$]0-" +// arm:"TEXT .*, [$]-4-" +// arm64:"TEXT .*, [$]0-" +// mips:"TEXT .*, [$]-4-" +// ppc64x:"TEXT .*, [$]0-" +// s390x:"TEXT .*, [$]0-" func StackStore() int { var x int return *(&x) @@ -35,13 +35,13 @@ type T struct { // Check that large structs are cleared directly (issue #24416). -// 386:"TEXT\t.*, [$]0-" -// amd64:"TEXT\t.*, [$]0-" -// arm:"TEXT\t.*, [$]0-" (spills return address) -// arm64:"TEXT\t.*, [$]0-" -// mips:"TEXT\t.*, [$]-4-" -// ppc64x:"TEXT\t.*, [$]0-" -// s390x:"TEXT\t.*, [$]0-" +// 386:"TEXT .*, [$]0-" +// amd64:"TEXT .*, [$]0-" +// arm:"TEXT .*, [$]0-" (spills return address) +// arm64:"TEXT .*, [$]0-" +// mips:"TEXT .*, [$]-4-" +// ppc64x:"TEXT .*, [$]0-" +// s390x:"TEXT .*, [$]0-" func ZeroLargeStruct(x *T) { t := T{} *x = t @@ -51,11 +51,11 @@ func ZeroLargeStruct(x *T) { // Notes: // - 386 fails due to spilling a register -// amd64:"TEXT\t.*, [$]0-" -// arm:"TEXT\t.*, [$]0-" (spills return address) -// arm64:"TEXT\t.*, [$]0-" -// ppc64x:"TEXT\t.*, [$]0-" -// s390x:"TEXT\t.*, [$]0-" +// amd64:"TEXT .*, [$]0-" +// arm:"TEXT .*, [$]0-" (spills return address) +// arm64:"TEXT .*, [$]0-" +// ppc64x:"TEXT .*, [$]0-" +// s390x:"TEXT .*, [$]0-" // Note: that 386 currently has to spill a register. func KeepWanted(t *T) { *t = T{A: t.A, B: t.B, C: t.C, D: t.D} @@ -66,23 +66,23 @@ func KeepWanted(t *T) { // Notes: // - 386 fails due to spilling a register // - arm & mips fail due to softfloat calls -// amd64:"TEXT\t.*, [$]0-" -// arm64:"TEXT\t.*, [$]0-" -// ppc64x:"TEXT\t.*, [$]0-" -// s390x:"TEXT\t.*, [$]0-" +// amd64:"TEXT .*, [$]0-" +// arm64:"TEXT .*, [$]0-" +// ppc64x:"TEXT .*, [$]0-" +// s390x:"TEXT .*, [$]0-" func ArrayAdd64(a, b [4]float64) [4]float64 { return [4]float64{a[0] + b[0], a[1] + b[1], a[2] + b[2], a[3] + b[3]} } // Check that small array initialization avoids using the stack. -// 386:"TEXT\t.*, [$]0-" -// amd64:"TEXT\t.*, [$]0-" -// arm:"TEXT\t.*, [$]0-" (spills return address) -// arm64:"TEXT\t.*, [$]0-" -// mips:"TEXT\t.*, [$]-4-" -// ppc64x:"TEXT\t.*, [$]0-" -// s390x:"TEXT\t.*, [$]0-" +// 386:"TEXT .*, [$]0-" +// amd64:"TEXT .*, [$]0-" +// arm:"TEXT .*, [$]0-" (spills return address) +// arm64:"TEXT .*, [$]0-" +// mips:"TEXT .*, [$]-4-" +// ppc64x:"TEXT .*, [$]0-" +// s390x:"TEXT .*, [$]0-" func ArrayInit(i, j int) [4]int { return [4]int{i, 0, j, 0} } @@ -99,7 +99,7 @@ func check_asmout(b [2]int) int { // Check that simple functions get promoted to nosplit, even when // they might panic in various ways. See issue 31219. -// amd64:"TEXT\t.*NOSPLIT.*" +// amd64:"TEXT .*NOSPLIT.*" func MightPanic(a []int, i, j, k, s int) { _ = a[i] // panicIndex _ = a[i:j] // panicSlice @@ -113,7 +113,7 @@ func Defer() { for i := 0; i < 2; i++ { defer func() {}() } - // amd64:`CALL\truntime\.deferprocStack` + // amd64:`CALL runtime\.deferprocStack` defer func() {}() } @@ -138,7 +138,7 @@ type mySlice struct { cap int } -// amd64:"TEXT\t.*, [$]0-" +// amd64:"TEXT .*, [$]0-" func sliceInit(base uintptr) []uintptr { const ptrSize = 8 size := uintptr(4096) @@ -150,7 +150,7 @@ func sliceInit(base uintptr) []uintptr { elements, elements, } - // amd64:-"POPQ",-"SP" + // amd64:-"POPQ" -"SP" return *(*[]uintptr)(unsafe.Pointer(&sl)) } @@ -171,6 +171,6 @@ func getp2() *[4]int { // Store to an argument without read can be removed. func storeArg(a [2]int) { - // amd64:-`MOVQ\t\$123,.*\.a\+\d+\(SP\)` + // amd64:-`MOVQ \$123,.*\.a\+\d+\(SP\)` a[1] = 123 } diff --git a/test/codegen/strings.go b/test/codegen/strings.go index 498c3d398f8..9d85604395b 100644 --- a/test/codegen/strings.go +++ b/test/codegen/strings.go @@ -22,8 +22,8 @@ func CountBytes(s []byte) int { } func ToByteSlice() []byte { // Issue #24698 - // amd64:`LEAQ\ttype:\[3\]uint8` - // amd64:`CALL\truntime\.newobject` + // amd64:`LEAQ type:\[3\]uint8` + // amd64:`CALL runtime\.mallocTiny3` // amd64:-`.*runtime.stringtoslicebyte` return []byte("foo") } @@ -37,56 +37,56 @@ func ConvertToByteSlice(a, b, c string) []byte { func ConstantLoad() { // 12592 = 0x3130 // 50 = 0x32 - // amd64:`MOVW\t\$12592, \(`,`MOVB\t\$50, 2\(` - // 386:`MOVW\t\$12592, \(`,`MOVB\t\$50, 2\(` - // arm:`MOVW\t\$48`,`MOVW\t\$49`,`MOVW\t\$50` - // arm64:`MOVD\t\$12592`,`MOVD\t\$50` - // loong64:`MOVV\t\$12592`,`MOVV\t\$50` - // wasm:`I64Const\t\$12592`,`I64Store16\t\$0`,`I64Const\t\$50`,`I64Store8\t\$2` - // mips64:`MOVV\t\$48`,`MOVV\t\$49`,`MOVV\t\$50` + // amd64:`MOVW \$12592, \(`,`MOVB \$50, 2\(` + // 386:`MOVW \$12592, \(`,`MOVB \$50, 2\(` + // arm:`MOVW \$48`,`MOVW \$49`,`MOVW \$50` + // arm64:`MOVD \$12592`,`MOVD \$50` + // loong64:`MOVV \$12592`,`MOVV \$50` + // wasm:`I64Const \$12592`,`I64Store16 \$0`,`I64Const \$50`,`I64Store8 \$2` + // mips64:`MOVV \$48`,`MOVV \$49`,`MOVV \$50` bsink = []byte("012") // 858927408 = 0x33323130 // 13620 = 0x3534 - // amd64:`MOVL\t\$858927408`,`MOVW\t\$13620, 4\(` - // 386:`MOVL\t\$858927408`,`MOVW\t\$13620, 4\(` - // arm64:`MOVD\t\$858927408`,`MOVD\t\$13620` - // loong64:`MOVV\t\$858927408`,`MOVV\t\$13620` - // wasm:`I64Const\t\$858927408`,`I64Store32\t\$0`,`I64Const\t\$13620`,`I64Store16\t\$4` + // amd64:`MOVL \$858927408`,`MOVW \$13620, 4\(` + // 386:`MOVL \$858927408`,`MOVW \$13620, 4\(` + // arm64:`MOVD \$858927408`,`MOVD \$13620` + // loong64:`MOVV \$858927408`,`MOVV \$13620` + // wasm:`I64Const \$858927408`,`I64Store32 \$0`,`I64Const \$13620`,`I64Store16 \$4` bsink = []byte("012345") // 3978425819141910832 = 0x3736353433323130 // 7306073769690871863 = 0x6564636261393837 - // amd64:`MOVQ\t\$3978425819141910832`,`MOVQ\t\$7306073769690871863` - // 386:`MOVL\t\$858927408, \(`,`DUFFCOPY` - // arm64:`MOVD\t\$3978425819141910832`,`MOVD\t\$7306073769690871863`,`MOVD\t\$15` - // loong64:`MOVV\t\$3978425819141910832`,`MOVV\t\$7306073769690871863`,`MOVV\t\$15` - // wasm:`I64Const\t\$3978425819141910832`,`I64Store\t\$0`,`I64Const\t\$7306073769690871863`,`I64Store\t\$7` + // amd64:`MOVQ \$3978425819141910832`,`MOVQ \$7306073769690871863` + // 386:`MOVL \$858927408, \(`,`DUFFCOPY` + // arm64:`MOVD \$3978425819141910832`,`MOVD \$7306073769690871863`,`MOVD \$15` + // loong64:`MOVV \$3978425819141910832`,`MOVV \$7306073769690871863`,`MOVV \$15` + // wasm:`I64Const \$3978425819141910832`,`I64Store \$0`,`I64Const \$7306073769690871863`,`I64Store \$7` bsink = []byte("0123456789abcde") // 56 = 0x38 - // amd64:`MOVQ\t\$3978425819141910832`,`MOVB\t\$56` - // loong64:`MOVV\t\$3978425819141910832`,`MOVV\t\$56` + // amd64:`MOVQ \$3978425819141910832`,`MOVB \$56` + // loong64:`MOVV \$3978425819141910832`,`MOVV \$56` bsink = []byte("012345678") // 14648 = 0x3938 - // amd64:`MOVQ\t\$3978425819141910832`,`MOVW\t\$14648` - // loong64:`MOVV\t\$3978425819141910832`,`MOVV\t\$14648` + // amd64:`MOVQ \$3978425819141910832`,`MOVW \$14648` + // loong64:`MOVV \$3978425819141910832`,`MOVV \$14648` bsink = []byte("0123456789") // 1650538808 = 0x62613938 - // amd64:`MOVQ\t\$3978425819141910832`,`MOVL\t\$1650538808` - // loong64:`MOVV\t\$3978425819141910832`,`MOVV\t\$1650538808` + // amd64:`MOVQ \$3978425819141910832`,`MOVL \$1650538808` + // loong64:`MOVV \$3978425819141910832`,`MOVV \$1650538808` bsink = []byte("0123456789ab") } // self-equality is always true. See issue 60777. func EqualSelf(s string) bool { - // amd64:`MOVL\t\$1, AX`,-`.*memequal.*` + // amd64:`MOVL \$1, AX`,-`.*memequal.*` return s == s } func NotEqualSelf(s string) bool { - // amd64:`XORL\tAX, AX`,-`.*memequal.*` + // amd64:`XORL AX, AX`,-`.*memequal.*` return s != s } diff --git a/test/codegen/structs.go b/test/codegen/structs.go index 49a201f7432..1c8cb5d62d5 100644 --- a/test/codegen/structs.go +++ b/test/codegen/structs.go @@ -20,7 +20,7 @@ type Z1 struct { } func Zero1(t *Z1) { // Issue #18370 - // amd64:`MOVUPS\tX[0-9]+, \(.*\)`,`MOVQ\t\$0, 16\(.*\)` + // amd64:`MOVUPS X[0-9]+, \(.*\)`,`MOVQ \$0, 16\(.*\)` *t = Z1{} } @@ -29,7 +29,7 @@ type Z2 struct { } func Zero2(t *Z2) { - // amd64:`MOVUPS\tX[0-9]+, \(.*\)`,`MOVQ\t\$0, 16\(.*\)` + // amd64:`MOVUPS X[0-9]+, \(.*\)`,`MOVQ \$0, 16\(.*\)` // amd64:`.*runtime[.]gcWriteBarrier.*\(SB\)` *t = Z2{} } @@ -43,6 +43,6 @@ type I1 struct { } func Init1(p *I1) { // Issue #18872 - // amd64:`MOVQ\t[$]1`,`MOVQ\t[$]2`,`MOVQ\t[$]3`,`MOVQ\t[$]4` + // amd64:`MOVQ [$]1`,`MOVQ [$]2`,`MOVQ [$]3`,`MOVQ [$]4` *p = I1{1, 2, 3, 4} } diff --git a/test/codegen/switch.go b/test/codegen/switch.go index 1a66a5ddf84..a8860d0cc49 100644 --- a/test/codegen/switch.go +++ b/test/codegen/switch.go @@ -137,8 +137,8 @@ type K interface { // use a runtime call for type switches to interface types. func interfaceSwitch(x any) int { - // amd64:`CALL\truntime.interfaceSwitch`,`MOVL\t16\(AX\)`,`MOVQ\t8\(.*\)(.*\*8)` - // arm64:`CALL\truntime.interfaceSwitch`,`LDAR`,`MOVWU\t16\(R0\)`,`MOVD\t\(R.*\)\(R.*\)` + // amd64:`CALL runtime.interfaceSwitch`,`MOVL 16\(AX\)`,`MOVQ 8\(.*\)(.*\*8)` + // arm64:`CALL runtime.interfaceSwitch`,`LDAR`,`MOVWU 16\(R0\)`,`MOVD \(R.*\)\(R.*\)` switch x.(type) { case I: return 1 @@ -150,8 +150,8 @@ func interfaceSwitch(x any) int { } func interfaceSwitch2(x K) int { - // amd64:`CALL\truntime.interfaceSwitch`,`MOVL\t16\(AX\)`,`MOVQ\t8\(.*\)(.*\*8)` - // arm64:`CALL\truntime.interfaceSwitch`,`LDAR`,`MOVWU\t16\(R0\)`,`MOVD\t\(R.*\)\(R.*\)` + // amd64:`CALL runtime.interfaceSwitch`,`MOVL 16\(AX\)`,`MOVQ 8\(.*\)(.*\*8)` + // arm64:`CALL runtime.interfaceSwitch`,`LDAR`,`MOVWU 16\(R0\)`,`MOVD \(R.*\)\(R.*\)` switch x.(type) { case I: return 1 @@ -163,8 +163,8 @@ func interfaceSwitch2(x K) int { } func interfaceCast(x any) int { - // amd64:`CALL\truntime.typeAssert`,`MOVL\t16\(AX\)`,`MOVQ\t8\(.*\)(.*\*1)` - // arm64:`CALL\truntime.typeAssert`,`LDAR`,`MOVWU\t16\(R0\)`,`MOVD\t\(R.*\)\(R.*\)` + // amd64:`CALL runtime.typeAssert`,`MOVL 16\(AX\)`,`MOVQ 8\(.*\)(.*\*1)` + // arm64:`CALL runtime.typeAssert`,`LDAR`,`MOVWU 16\(R0\)`,`MOVD \(R.*\)\(R.*\)` if _, ok := x.(I); ok { return 3 } @@ -172,8 +172,8 @@ func interfaceCast(x any) int { } func interfaceCast2(x K) int { - // amd64:`CALL\truntime.typeAssert`,`MOVL\t16\(AX\)`,`MOVQ\t8\(.*\)(.*\*1)` - // arm64:`CALL\truntime.typeAssert`,`LDAR`,`MOVWU\t16\(R0\)`,`MOVD\t\(R.*\)\(R.*\)` + // amd64:`CALL runtime.typeAssert`,`MOVL 16\(AX\)`,`MOVQ 8\(.*\)(.*\*1)` + // arm64:`CALL runtime.typeAssert`,`LDAR`,`MOVWU 16\(R0\)`,`MOVD \(R.*\)\(R.*\)` if _, ok := x.(I); ok { return 3 } @@ -181,8 +181,8 @@ func interfaceCast2(x K) int { } func interfaceConv(x IJ) I { - // amd64:`CALL\truntime.typeAssert`,`MOVL\t16\(AX\)`,`MOVQ\t8\(.*\)(.*\*1)` - // arm64:`CALL\truntime.typeAssert`,`LDAR`,`MOVWU\t16\(R0\)`,`MOVD\t\(R.*\)\(R.*\)` + // amd64:`CALL runtime.typeAssert`,`MOVL 16\(AX\)`,`MOVQ 8\(.*\)(.*\*1)` + // arm64:`CALL runtime.typeAssert`,`LDAR`,`MOVWU 16\(R0\)`,`MOVD \(R.*\)\(R.*\)` return x } @@ -195,7 +195,7 @@ func stringSwitchInlineable(s string) { } } func stringSwitch() { - // amd64:-"CMP",-"CALL" - // arm64:-"CMP",-"CALL" + // amd64:-"CMP" -"CALL" + // arm64:-"CMP" -"CALL" stringSwitchInlineable("foo") } diff --git a/test/codegen/typeswitch.go b/test/codegen/typeswitch.go index 93f8e84269f..db78a6e5abe 100644 --- a/test/codegen/typeswitch.go +++ b/test/codegen/typeswitch.go @@ -37,7 +37,7 @@ func swGYZ[T any](a Ig[T]) { t.Y() case Iz: // amd64:-".*typeAssert" t.Z() - case interface{ G() T }: // amd64:-".*typeAssert",-".*assertE2I\\(",".*assertE2I2" + case interface{ G() T }: // amd64:-".*typeAssert" -".*assertE2I\\(" ".*assertE2I2" t.G() } } @@ -46,7 +46,7 @@ func swE2G[T any](a any) { switch t := a.(type) { case Iy: t.Y() - case Ig[T]: // amd64:-".*assertE2I\\(",".*assertE2I2" + case Ig[T]: // amd64:-".*assertE2I\\(" ".*assertE2I2" t.G() } } @@ -55,7 +55,7 @@ func swI2G[T any](a Ix) { switch t := a.(type) { case Iy: t.Y() - case Ig[T]: // amd64:-".*assertE2I\\(",".*assertE2I2" + case Ig[T]: // amd64:-".*assertE2I\\(" ".*assertE2I2" t.G() } } diff --git a/test/codegen/zerosize.go b/test/codegen/zerosize.go index 86c48194008..1e93260ef5a 100644 --- a/test/codegen/zerosize.go +++ b/test/codegen/zerosize.go @@ -12,24 +12,24 @@ package codegen func zeroSize() { c := make(chan struct{}) - // amd64:`MOVQ\t\$0, command-line-arguments\.s\+56\(SP\)` + // amd64:`MOVQ \$0, command-line-arguments\.s\+56\(SP\)` var s *int // force s to be a stack object, also use some (fixed) stack space g(&s, 1, 2, 3, 4, 5) - // amd64:`LEAQ\tcommand-line-arguments\..*\+55\(SP\)` + // amd64:`LEAQ command-line-arguments\..*\+55\(SP\)` c <- noliteral(struct{}{}) } // Like zeroSize, but without hiding the zero-sized struct. func zeroSize2() { c := make(chan struct{}) - // amd64:`MOVQ\t\$0, command-line-arguments\.s\+48\(SP\)` + // amd64:`MOVQ \$0, command-line-arguments\.s\+48\(SP\)` var s *int // force s to be a stack object, also use some (fixed) stack space g(&s, 1, 2, 3, 4, 5) - // amd64:`LEAQ\tcommand-line-arguments\..*stmp_\d+\(SB\)` + // amd64:`LEAQ command-line-arguments\..*stmp_\d+\(SB\)` c <- struct{}{} } diff --git a/test/convert5.go b/test/convert5.go new file mode 100644 index 00000000000..df247ca0b9b --- /dev/null +++ b/test/convert5.go @@ -0,0 +1,268 @@ +// run -gcflags=-d=converthash=qy + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !wasm && !386 && !arm && !mips + +// TODO fix this to work for wasm and 32-bit architectures. +// Doing more than this, however, expands the change. + +package main + +import ( + "fmt" + "runtime" +) + +// This test checks that conversion from floats to (unsigned) 32 and 64-bit +// integers has the same sensible behavior for corner cases, and that the +// conversions to smaller integers agree. Because outliers are platform- +// independent, the "golden test" for smaller integers is more like of +// a "gold-ish test" and subject to change. + +//go:noinline +func id[T any](x T) T { + return x +} + +//go:noinline +func want[T comparable](name string, x, y T) { + if x != y { + _, _, line, _ := runtime.Caller(1) + fmt.Println("FAIL at line", line, "var =", name, "got =", x, "want =", y) + } +} + +//go:noinline +func log[T comparable](name string, x T) { + fmt.Println(name, x) +} + +const ( + // pX = max positive signed X bit + // nX = min negative signed X bit + // uX = max unsigned X bit + // tX = two to the X + p32 = 2147483647 + n32 = -2147483648 + u32 = 4294967295 + p64 = 9223372036854775807 + n64 = -9223372036854775808 + u64 = 18446744073709551615 + t44 = 1 << 44 +) + +func main() { + one := 1.0 + minus1_32 := id(float32(-1.0)) + minus1_64 := id(float64(-1.0)) + p32_plus4k_plus1 := id(float32(p32 + 4096 + 1)) // want this to be precise and fit in 24 bits mantissa + p64_plus4k_plus1 := id(float64(p64 + 4096 + 1)) // want this to be precise and fit in 53 bits mantissa + n32_minus4k := id(float32(n32 - 4096)) + n64_minus4k := id(float64(n64 - 4096)) + inf_32 := id(float32(one / 0)) + inf_64 := id(float64(one / 0)) + ninf_32 := id(float32(-one / 0)) + ninf_64 := id(float64(-one / 0)) + + // int32 conversions + int32Tests := []struct { + name string + input any // Use any to handle both float32 and float64 + expected int32 + }{ + {"minus1_32", minus1_32, -1}, + {"minus1_64", minus1_64, -1}, + {"p32_plus4k_plus1", p32_plus4k_plus1, p32}, + {"p64_plus4k_plus1", p64_plus4k_plus1, p32}, + {"n32_minus4k", n32_minus4k, n32}, + {"n64_minus4k", n64_minus4k, n32}, + {"inf_32", inf_32, p32}, + {"inf_64", inf_64, p32}, + {"ninf_32", ninf_32, n32}, + {"ninf_64", ninf_64, n32}, + } + + for _, test := range int32Tests { + var converted int32 + switch v := test.input.(type) { + case float32: + converted = int32(v) + case float64: + converted = int32(v) + } + want(test.name, converted, test.expected) + } + + // int64 conversions + int64Tests := []struct { + name string + input any + expected int64 + }{ + {"minus1_32", minus1_32, -1}, + {"minus1_64", minus1_64, -1}, + {"p32_plus4k_plus1", p32_plus4k_plus1, p32 + 4096 + 1}, + {"p64_plus4k_plus1", p64_plus4k_plus1, p64}, + {"n32_minus4k", n32_minus4k, n32 - 4096}, + {"n64_minus4k", n64_minus4k, n64}, + {"inf_32", inf_32, p64}, + {"inf_64", inf_64, p64}, + {"ninf_32", ninf_32, n64}, + {"ninf_64", ninf_64, n64}, + } + + for _, test := range int64Tests { + var converted int64 + switch v := test.input.(type) { + case float32: + converted = int64(v) + case float64: + converted = int64(v) + } + want(test.name, converted, test.expected) + } + + // uint32 conversions + uint32Tests := []struct { + name string + input any + expected uint32 + }{ + {"minus1_32", minus1_32, 0}, + {"minus1_64", minus1_64, 0}, + {"p32_plus4k_plus1", p32_plus4k_plus1, p32 + 4096 + 1}, + {"p64_plus4k_plus1", p64_plus4k_plus1, u32}, + {"n32_minus4k", n32_minus4k, 0}, + {"n64_minus4k", n64_minus4k, 0}, + {"inf_32", inf_32, u32}, + {"inf_64", inf_64, u32}, + {"ninf_32", ninf_32, 0}, + {"ninf_64", ninf_64, 0}, + } + + for _, test := range uint32Tests { + var converted uint32 + switch v := test.input.(type) { + case float32: + converted = uint32(v) + case float64: + converted = uint32(v) + } + want(test.name, converted, test.expected) + } + + u64_plus4k_plus1_64 := id(float64(u64 + 4096 + 1)) + u64_plust44_plus1_32 := id(float32(u64 + t44 + 1)) + + // uint64 conversions + uint64Tests := []struct { + name string + input any + expected uint64 + }{ + {"minus1_32", minus1_32, 0}, + {"minus1_64", minus1_64, 0}, + {"p32_plus4k_plus1", p32_plus4k_plus1, p32 + 4096 + 1}, + {"p64_plus4k_plus1", p64_plus4k_plus1, p64 + 4096 + 1}, + {"n32_minus4k", n32_minus4k, 0}, + {"n64_minus4k", n64_minus4k, 0}, + {"inf_32", inf_32, u64}, + {"inf_64", inf_64, u64}, + {"ninf_32", ninf_32, 0}, + {"ninf_64", ninf_64, 0}, + {"u64_plus4k_plus1_64", u64_plus4k_plus1_64, u64}, + {"u64_plust44_plus1_32", u64_plust44_plus1_32, u64}, + } + + for _, test := range uint64Tests { + var converted uint64 + switch v := test.input.(type) { + case float32: + converted = uint64(v) + case float64: + converted = uint64(v) + } + want(test.name, converted, test.expected) + } + + // for smaller integer types + // TODO the overflow behavior is dubious, maybe we should fix it to be more sensible, e.g. saturating. + fmt.Println("Below this are 'golden' results to check for consistency across platforms. Overflow behavior is not necessarily what we want") + + u8plus2 := id(float64(257)) + p8minus1 := id(float32(126)) + n8plus2 := id(float64(-126)) + n8minusone := id(float32(-129)) + + fmt.Println("\nuint8 conversions") + uint8Tests := []struct { + name string + input any + }{ + {"minus1_32", minus1_32}, + {"minus1_64", minus1_64}, + {"p32_plus4k_plus1", p32_plus4k_plus1}, + {"p64_plus4k_plus1", p64_plus4k_plus1}, + {"n32_minus4k", n32_minus4k}, + {"n64_minus4k", n64_minus4k}, + {"inf_32", inf_32}, + {"inf_64", inf_64}, + {"ninf_32", ninf_32}, + {"ninf_64", ninf_64}, + {"u64_plus4k_plus1_64", u64_plus4k_plus1_64}, + {"u64_plust44_plus1_32", u64_plust44_plus1_32}, + {"u8plus2", u8plus2}, + {"p8minus1", p8minus1}, + {"n8plus2", n8plus2}, + {"n8minusone", n8minusone}, + } + + for _, test := range uint8Tests { + var converted uint8 + switch v := test.input.(type) { + case float32: + converted = uint8(v) + case float64: + converted = uint8(v) + } + log(test.name, converted) + } + + fmt.Println("\nint8 conversions") + int8Tests := []struct { + name string + input any + }{ + {"minus1_32", minus1_32}, + {"minus1_64", minus1_64}, + {"p32_plus4k_plus1", p32_plus4k_plus1}, + {"p64_plus4k_plus1", p64_plus4k_plus1}, + {"n32_minus4k", n32_minus4k}, + {"n64_minus4k", n64_minus4k}, + {"inf_32", inf_32}, + {"inf_64", inf_64}, + {"ninf_32", ninf_32}, + {"ninf_64", ninf_64}, + {"u64_plus4k_plus1_64", u64_plus4k_plus1_64}, + {"u64_plust44_plus1_32", u64_plust44_plus1_32}, + {"u8plus2", u8plus2}, + {"p8minus1", p8minus1}, + {"n8plus2", n8plus2}, + {"n8minusone", n8minusone}, + } + + for _, test := range int8Tests { + var converted int8 + switch v := test.input.(type) { + case float32: + converted = int8(v) + case float64: + converted = int8(v) + } + log(test.name, converted) + } + +} diff --git a/test/convert5.out b/test/convert5.out new file mode 100644 index 00000000000..47a8af67f96 --- /dev/null +++ b/test/convert5.out @@ -0,0 +1,37 @@ +Below this are 'golden' results to check for consistency across platforms. Overflow behavior is not necessarily what we want + +uint8 conversions +minus1_32 255 +minus1_64 255 +p32_plus4k_plus1 255 +p64_plus4k_plus1 255 +n32_minus4k 0 +n64_minus4k 0 +inf_32 255 +inf_64 255 +ninf_32 0 +ninf_64 0 +u64_plus4k_plus1_64 255 +u64_plust44_plus1_32 255 +u8plus2 1 +p8minus1 126 +n8plus2 130 +n8minusone 127 + +int8 conversions +minus1_32 -1 +minus1_64 -1 +p32_plus4k_plus1 -1 +p64_plus4k_plus1 -1 +n32_minus4k 0 +n64_minus4k 0 +inf_32 -1 +inf_64 -1 +ninf_32 0 +ninf_64 0 +u64_plus4k_plus1_64 -1 +u64_plust44_plus1_32 -1 +u8plus2 1 +p8minus1 126 +n8plus2 -126 +n8minusone 127 diff --git a/test/deferprint.out b/test/deferprint.out index a71cfcebd7e..99f0a515d54 100644 --- a/test/deferprint.out +++ b/test/deferprint.out @@ -1,2 +1,2 @@ printing: 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 -42 true false true +1.500000e+000 world 0x0 [0/0]0x0 0x0 0x0 255 +42 true false true 1.5 world 0x0 [0/0]0x0 0x0 0x0 255 diff --git a/test/devirtualization.go b/test/devirtualization.go new file mode 100644 index 00000000000..edabb94108d --- /dev/null +++ b/test/devirtualization.go @@ -0,0 +1,1283 @@ +// errorcheck -0 -m + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package escape + +type M interface{ M() } + +type A interface{ A() } + +type C interface{ C() } + +type Impl struct{} + +func (*Impl) M() {} // ERROR "can inline \(\*Impl\).M$" + +func (*Impl) A() {} // ERROR "can inline \(\*Impl\).A$" + +type Impl2 struct{} + +func (*Impl2) M() {} // ERROR "can inline \(\*Impl2\).M$" + +func (*Impl2) A() {} // ERROR "can inline \(\*Impl2\).A$" + +type CImpl struct{} + +func (CImpl) C() {} // ERROR "can inline CImpl.C$" + +func typeAsserts() { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + + a.(M).M() // ERROR "devirtualizing a.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + a.(A).A() // ERROR "devirtualizing a.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + a.(*Impl).M() // ERROR "inlining call to \(\*Impl\).M" + a.(*Impl).A() // ERROR "inlining call to \(\*Impl\).A" + + v := a.(M) + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + v.(A).A() // ERROR "devirtualizing v.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + v.(*Impl).A() // ERROR "inlining call to \(\*Impl\).A" + v.(*Impl).M() // ERROR "inlining call to \(\*Impl\).M" + + v2 := a.(A) + v2.A() // ERROR "devirtualizing v2.A to \*Impl$" "inlining call to \(\*Impl\).A" + v2.(M).M() // ERROR "devirtualizing v2.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + v2.(*Impl).A() // ERROR "inlining call to \(\*Impl\).A" + v2.(*Impl).M() // ERROR "inlining call to \(\*Impl\).M" + + a.(M).(A).A() // ERROR "devirtualizing a.\(M\).\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + a.(A).(M).M() // ERROR "devirtualizing a.\(A\).\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + + a.(M).(A).(*Impl).A() // ERROR "inlining call to \(\*Impl\).A" + a.(A).(M).(*Impl).M() // ERROR "inlining call to \(\*Impl\).M" + + any(a).(M).M() // ERROR "devirtualizing any\(a\).\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + any(a).(A).A() // ERROR "devirtualizing any\(a\).\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + any(a).(M).(any).(A).A() // ERROR "devirtualizing any\(a\).\(M\).\(any\).\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + + c := any(a) + c.(A).A() // ERROR "devirtualizing c.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + c.(M).M() // ERROR "devirtualizing c.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + + M(a).M() // ERROR "devirtualizing M\(a\).M to \*Impl$" "inlining call to \(\*Impl\).M" + M(M(a)).M() // ERROR "devirtualizing M\(M\(a\)\).M to \*Impl$" "inlining call to \(\*Impl\).M" + + a2 := a.(A) + A(a2).A() // ERROR "devirtualizing A\(a2\).A to \*Impl$" "inlining call to \(\*Impl\).A" + A(A(a2)).A() // ERROR "devirtualizing A\(A\(a2\)\).A to \*Impl$" "inlining call to \(\*Impl\).A" + + { + var a C = &CImpl{} // ERROR "&CImpl{} does not escape$" + a.(any).(C).C() // ERROR "devirtualizing a.\(any\).\(C\).C to \*CImpl$" "inlining call to CImpl.C" + a.(any).(*CImpl).C() // ERROR "inlining call to CImpl.C" + } +} + +func typeAssertsWithOkReturn() { + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + if v, ok := a.(M); ok { + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + } + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + if v, ok := a.(A); ok { + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + } + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + v, ok := a.(M) + if ok { + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + } + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + v, ok := a.(A) + if ok { + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + } + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + v, ok := a.(*Impl) + if ok { + v.A() // ERROR "inlining call to \(\*Impl\).A" + v.M() // ERROR "inlining call to \(\*Impl\).M" + } + } + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + v, _ := a.(M) + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + v, _ := a.(A) + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + v, _ := a.(*Impl) + v.A() // ERROR "inlining call to \(\*Impl\).A" + v.M() // ERROR "inlining call to \(\*Impl\).M" + } + { + a := newM() // ERROR "&Impl{} does not escape$" "inlining call to newM" + callA(a) // ERROR "devirtualizing m.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" "inlining call to callA" + callIfA(a) // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" "inlining call to callIfA" + } + { + _, a := newM2ret() // ERROR "&Impl{} does not escape$" "inlining call to newM2ret" + callA(a) // ERROR "devirtualizing m.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" "inlining call to callA" + callIfA(a) // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" "inlining call to callIfA" + } + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + // Note the !ok condition, devirtualizing here is fine. + if v, ok := a.(M); !ok { + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + } + { + var a A = newImplNoInline() + if v, ok := a.(M); ok { + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + } + { + var impl2InA A = &Impl2{} // ERROR "&Impl2{} does not escape$" + var a A + a, _ = impl2InA.(*Impl) + // a now contains the zero value of *Impl + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + a := newANoInline() + a.A() + } + { + _, a := newANoInlineRet2() + a.A() + } +} + +func newM() M { // ERROR "can inline newM$" + return &Impl{} // ERROR "&Impl{} escapes to heap$" +} + +func newM2ret() (int, M) { // ERROR "can inline newM2ret$" + return -1, &Impl{} // ERROR "&Impl{} escapes to heap$" +} + +func callA(m M) { // ERROR "can inline callA$" "leaking param: m$" + m.(A).A() +} + +func callIfA(m M) { // ERROR "can inline callIfA$" "leaking param: m$" + if v, ok := m.(A); ok { + v.A() + } +} + +//go:noinline +func newImplNoInline() *Impl { + return &Impl{} // ERROR "&Impl{} escapes to heap$" +} + +//go:noinline +func newImpl2ret2() (string, *Impl2) { + return "str", &Impl2{} // ERROR "&Impl2{} escapes to heap$" +} + +//go:noinline +func newImpl2() *Impl2 { + return &Impl2{} // ERROR "&Impl2{} escapes to heap$" +} + +//go:noinline +func newANoInline() A { + return &Impl{} // ERROR "&Impl{} escapes to heap$" +} + +//go:noinline +func newANoInlineRet2() (string, A) { + return "", &Impl{} // ERROR "&Impl{} escapes to heap$" +} + +func testTypeSwitch() { + { + var v A = &Impl{} // ERROR "&Impl{} does not escape$" + switch v := v.(type) { + case A: + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + case M: + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + } + { + var v A = &Impl{} // ERROR "&Impl{} does not escape$" + switch v := v.(type) { + case A: + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + case M: + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + v = &Impl{} // ERROR "&Impl{} does not escape$" + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + v.(M).M() // ERROR "devirtualizing v.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + } + { + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + switch v1 := v.(type) { + case A: + v1.A() + case M: + v1.M() + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + } + } + { + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + switch v := v.(type) { + case A: + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + case M: + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + case C: + v.C() + } + } + { + var v A = &Impl{} // ERROR "&Impl{} does not escape$" + switch v := v.(type) { + case M: + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + default: + panic("does not implement M") // ERROR ".does not implement M. escapes to heap$" + } + } +} + +func differentTypeAssign() { + { + var a A + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + a = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + a.A() + } + { + a := A(&Impl{}) // ERROR "&Impl{} escapes to heap$" + a = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + a.A() + } + { + a := A(&Impl{}) // ERROR "&Impl{} escapes to heap$" + a.A() + a = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + } + { + a := A(&Impl{}) // ERROR "&Impl{} escapes to heap$" + a = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + var asAny any = a + asAny.(A).A() + } + { + a := A(&Impl{}) // ERROR "&Impl{} escapes to heap$" + var asAny any = a + asAny = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + asAny.(A).A() + } + { + a := A(&Impl{}) // ERROR "&Impl{} escapes to heap$" + var asAny any = a + asAny.(A).A() + asAny = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a A + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + a = newImpl2() + a.A() + } + { + var a A + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + _, a = newImpl2ret2() + a.A() + } +} + +func assignWithTypeAssert() { + { + var i1 A = &Impl{} // ERROR "&Impl{} does not escape$" + var i2 A = &Impl2{} // ERROR "&Impl2{} does not escape$" + i1 = i2.(*Impl) // this will panic + i1.A() // ERROR "devirtualizing i1.A to \*Impl$" "inlining call to \(\*Impl\).A" + i2.A() // ERROR "devirtualizing i2.A to \*Impl2$" "inlining call to \(\*Impl2\).A" + } + { + var i1 A = &Impl{} // ERROR "&Impl{} does not escape$" + var i2 A = &Impl2{} // ERROR "&Impl2{} does not escape$" + i1, _ = i2.(*Impl) // i1 is going to be nil + i1.A() // ERROR "devirtualizing i1.A to \*Impl$" "inlining call to \(\*Impl\).A" + i2.A() // ERROR "devirtualizing i2.A to \*Impl2$" "inlining call to \(\*Impl2\).A" + } +} + +func nilIface() { + { + var v A = &Impl{} // ERROR "&Impl{} does not escape$" + v = nil + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A = &Impl{} // ERROR "&Impl{} does not escape$" + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + v = nil + } + { + var nilIface A + var v A = &Impl{} // ERROR "&Impl{} does not escape$" + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + v = nilIface + } + { + var nilIface A + var v A = &Impl{} // ERROR "&Impl{} does not escape$" + v = nilIface + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + v = &Impl{} // ERROR "&Impl{} does not escape$" + } + { + var v A + var v2 A = v + v2.A() // ERROR "devirtualizing v2.A to \*Impl$" "inlining call to \(\*Impl\).A" + v2 = &Impl{} // ERROR "&Impl{} does not escape$" + } + { + var v A + v.A() + } + { + var v A + var v2 A = v + v2.A() + } + { + var v A + var v2 A + v2 = v + v2.A() + } +} + +func longDevirtTest() { + var a interface { + M + A + } = &Impl{} // ERROR "&Impl{} does not escape$" + + { + var b A = a + b.A() // ERROR "devirtualizing b.A to \*Impl$" "inlining call to \(\*Impl\).A" + b.(M).M() // ERROR "devirtualizing b.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + } + { + var b M = a + b.M() // ERROR "devirtualizing b.M to \*Impl$" "inlining call to \(\*Impl\).M" + b.(A).A() // ERROR "devirtualizing b.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var b A = a.(M).(A) + b.A() // ERROR "devirtualizing b.A to \*Impl$" "inlining call to \(\*Impl\).A" + b.(M).M() // ERROR "devirtualizing b.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + } + { + var b M = a.(A).(M) + b.M() // ERROR "devirtualizing b.M to \*Impl$" "inlining call to \(\*Impl\).M" + b.(A).A() // ERROR "devirtualizing b.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + } + + if v, ok := a.(A); ok { + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + + if v, ok := a.(M); ok { + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + + { + var c A = a + + if v, ok := c.(A); ok { + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + + c = &Impl{} // ERROR "&Impl{} does not escape$" + + if v, ok := c.(M); ok { + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + + if v, ok := c.(interface { + A + M + }); ok { + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + } +} + +func deferDevirt() { + var a A + defer func() { // ERROR "can inline deferDevirt.func1$" "func literal does not escape$" + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + }() + a = &Impl{} // ERROR "&Impl{} does not escape$" + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" +} + +func deferNoDevirt() { + var a A + defer func() { // ERROR "can inline deferNoDevirt.func1$" "func literal does not escape$" + a = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + }() + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + a.A() +} + +//go:noinline +func closureDevirt() { + var a A + func() { // ERROR "func literal does not escape$" + // defer so that it does not lnline. + defer func() {}() // ERROR "can inline closureDevirt.func1.1$" "func literal does not escape$" + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + }() + a = &Impl{} // ERROR "&Impl{} does not escape$" + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" +} + +//go:noinline +func closureNoDevirt() { + var a A + func() { // ERROR "func literal does not escape$" + // defer so that it does not lnline. + defer func() {}() // ERROR "can inline closureNoDevirt.func1.1$" "func literal does not escape$" + a = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + }() + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + a.A() +} + +var global = "1" + +func closureDevirt2() { + var a A + a = &Impl{} // ERROR "&Impl{} does not escape$" + c := func() { // ERROR "can inline closureDevirt2.func1$" "func literal does not escape$" + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + } + if global == "1" { + c = func() { // ERROR "can inline closureDevirt2.func2$" "func literal does not escape$" + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + } + } + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + c() +} + +func closureNoDevirt2() { + var a A + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + c := func() { // ERROR "can inline closureNoDevirt2.func1$" "func literal does not escape$" + a = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + } + if global == "1" { + c = func() { // ERROR "can inline closureNoDevirt2.func2$" "func literal does not escape$" + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + } + } + a.A() + c() +} + +//go:noinline +func closureDevirt3() { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + func() { // ERROR "func literal does not escape$" + // defer so that it does not lnline. + defer func() {}() // ERROR "can inline closureDevirt3.func1.1$" "func literal does not escape$" + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + }() + func() { // ERROR "can inline closureDevirt3.func2$" + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + }() // ERROR "inlining call to closureDevirt3.func2" "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" +} + +//go:noinline +func closureNoDevirt3() { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + func() { // ERROR "func literal does not escape$" + // defer so that it does not lnline. + defer func() {}() // ERROR "can inline closureNoDevirt3.func1.1$" "func literal does not escape$" + a.A() + }() + func() { // ERROR "can inline closureNoDevirt3.func2$" + a.A() + }() // ERROR "inlining call to closureNoDevirt3.func2" + a = &Impl2{} // ERROR "&Impl2{} escapes to heap$" +} + +//go:noinline +func varDeclaredInClosureReferencesOuter() { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + func() { // ERROR "func literal does not escape$" + // defer for noinline + defer func() {}() // ERROR "can inline varDeclaredInClosureReferencesOuter.func1.1$" "func literal does not escape$" + var v A = a + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + }() + func() { // ERROR "func literal does not escape$" + // defer for noinline + defer func() {}() // ERROR "can inline varDeclaredInClosureReferencesOuter.func2.1$" "func literal does not escape$" + var v A = a + v = &Impl{} // ERROR "&Impl{} does not escape$" + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + }() + + var b A = &Impl{} // ERROR "&Impl{} escapes to heap$" + func() { // ERROR "func literal does not escape$" + // defer for noinline + defer func() {}() // ERROR "can inline varDeclaredInClosureReferencesOuter.func3.1$" "func literal does not escape$" + var v A = b + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + v.A() + }() + func() { // ERROR "func literal does not escape$" + // defer for noinline + defer func() {}() // ERROR "can inline varDeclaredInClosureReferencesOuter.func4.1$" "func literal does not escape$" + var v A = b + v.A() + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + }() +} + +//go:noinline +func testNamedReturn0() (v A) { + v = &Impl{} // ERROR "&Impl{} escapes to heap$" + v.A() + return +} + +//go:noinline +func testNamedReturn1() (v A) { + v = &Impl{} // ERROR "&Impl{} escapes to heap$" + v.A() + return &Impl{} // ERROR "&Impl{} escapes to heap$" +} + +func testNamedReturns3() (v A) { + v = &Impl{} // ERROR "&Impl{} escapes to heap$" + defer func() { // ERROR "can inline testNamedReturns3.func1$" "func literal does not escape$" + v.A() + }() + v.A() + return &Impl2{} // ERROR "&Impl2{} escapes to heap$" +} + +var ( + globalImpl = &Impl{} + globalImpl2 = &Impl2{} + globalA A = &Impl{} + globalM M = &Impl{} +) + +func globals() { + { + globalA.A() + globalA.(M).M() + globalM.M() + globalM.(A).A() + + a := globalA + a.A() + a.(M).M() + + m := globalM + m.M() + m.(A).A() + } + { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + a = globalImpl + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + a = A(globalImpl) + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + a = M(globalImpl).(A) + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + a = globalA.(*Impl) + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + a = globalM.(*Impl) + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + a = globalImpl2 + a.A() + } + { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + a = globalA + a.A() + } + { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + a = globalM.(A) + a.A() + } +} + +func mapsDevirt() { + { + m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$" + var v A = m[0] + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + v.(M).M() // ERROR "devirtualizing v.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + } + { + m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$" + var v A + var ok bool + if v, ok = m[0]; ok { + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$" + var v A + v, _ = m[0] + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } +} + +func mapsNoDevirt() { + { + m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$" + var v A = m[0] + v.A() + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + v.(M).M() + } + { + m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$" + var v A + var ok bool + if v, ok = m[0]; ok { + v.A() + } + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + v.A() + } + { + m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$" + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + v, _ = m[0] + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + v.A() + } + + { + m := make(map[int]A) // ERROR "make\(map\[int\]A\) does not escape$" + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + v = m[0] + v.A() + } + { + m := make(map[int]A) // ERROR "make\(map\[int\]A\) does not escape$" + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + var ok bool + if v, ok = m[0]; ok { + v.A() + } + v.A() + } + { + m := make(map[int]A) // ERROR "make\(map\[int\]A\) does not escape$" + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + v, _ = m[0] + v.A() + } +} + +func chanDevirt() { + { + m := make(chan *Impl) + var v A = <-m + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + m := make(chan *Impl) + var v A + v = <-m + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + m := make(chan *Impl) + var v A + v, _ = <-m + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + m := make(chan *Impl) + var v A + var ok bool + if v, ok = <-m; ok { + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + m := make(chan *Impl) + var v A + var ok bool + if v, ok = <-m; ok { + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + select { + case <-m: + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + case v = <-m: + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + case v, ok = <-m: + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + } +} + +func chanNoDevirt() { + { + m := make(chan *Impl) + var v A = <-m + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + v.A() + } + { + m := make(chan *Impl) + var v A + v = <-m + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + v.A() + } + { + m := make(chan *Impl) + var v A + v, _ = <-m + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + v.A() + } + { + m := make(chan *Impl) + var v A + var ok bool + if v, ok = <-m; ok { + v.A() + } + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + v.A() + } + { + m := make(chan *Impl) + var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + var ok bool + if v, ok = <-m; ok { + v.A() + } + } + { + m := make(chan *Impl) + var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + select { + case v = <-m: + v.A() + } + v.A() + } + { + m := make(chan *Impl) + var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + select { + case v, _ = <-m: + v.A() + } + v.A() + } + + { + m := make(chan A) + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + v = <-m + v.A() + } + { + m := make(chan A) + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + v, _ = <-m + v.A() + } + { + m := make(chan A) + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + var ok bool + if v, ok = <-m; ok { + v.A() + } + } + { + m := make(chan A) + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + select { + case v = <-m: + v.A() + } + v.A() + } + { + m := make(chan A) + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + select { + case v, _ = <-m: + v.A() + } + v.A() + } +} + +func rangeDevirt() { + { + var v A + m := make(map[*Impl]struct{}) // ERROR "make\(map\[\*Impl\]struct {}\) does not escape$" + v = &Impl{} // ERROR "&Impl{} does not escape$" + for v = range m { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + m := make(map[*Impl]*Impl) // ERROR "make\(map\[\*Impl\]\*Impl\) does not escape$" + v = &Impl{} // ERROR "&Impl{} does not escape$" + for v = range m { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + m := make(map[*Impl]*Impl) // ERROR "make\(map\[\*Impl\]\*Impl\) does not escape$" + v = &Impl{} // ERROR "&Impl{} does not escape$" + for _, v = range m { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + m := make(chan *Impl) + v = &Impl{} // ERROR "&Impl{} does not escape$" + for v = range m { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + m := []*Impl{} // ERROR "\[\]\*Impl{} does not escape$" + v = &Impl{} // ERROR "&Impl{} does not escape$" + for _, v = range m { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + v = &Impl{} // ERROR "&Impl{} does not escape$" + impl := &Impl{} // ERROR "&Impl{} does not escape$" + i := 0 + for v = impl; i < 10; i++ { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + v = &Impl{} // ERROR "&Impl{} does not escape$" + impl := &Impl{} // ERROR "&Impl{} does not escape$" + i := 0 + for v = impl; i < 10; i++ { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + m := [1]*Impl{&Impl{}} // ERROR "&Impl{} does not escape$" + v = &Impl{} // ERROR "&Impl{} does not escape$" + for _, v = range m { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + m := [1]*Impl{&Impl{}} // ERROR "&Impl{} does not escape$" + v = &Impl{} // ERROR "&Impl{} does not escape$" + for _, v = range &m { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } +} + +func rangeNoDevirt() { + { + var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + m := make(map[*Impl]struct{}) // ERROR "make\(map\[\*Impl\]struct {}\) does not escape$" + for v = range m { + } + v.A() + } + { + var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + m := make(map[*Impl]*Impl) // ERROR "make\(map\[\*Impl\]\*Impl\) does not escape$" + for v = range m { + } + v.A() + } + { + var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + m := make(map[*Impl]*Impl) // ERROR "make\(map\[\*Impl\]\*Impl\) does not escape$" + for _, v = range m { + } + v.A() + } + { + var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + m := make(chan *Impl) + for v = range m { + } + v.A() + } + { + var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + m := []*Impl{} // ERROR "\[\]\*Impl{} does not escape$" + for _, v = range m { + } + v.A() + } + { + var v A + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + impl := &Impl{} // ERROR "&Impl{} escapes to heap$" + i := 0 + for v = impl; i < 10; i++ { + } + v.A() + } + { + var v A + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + impl := &Impl{} // ERROR "&Impl{} escapes to heap$" + i := 0 + for v = impl; i < 10; i++ { + } + v.A() + } + { + var v A + m := [1]*Impl{&Impl{}} // ERROR "&Impl{} escapes to heap$" + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + for _, v = range m { + } + v.A() + } + { + var v A + m := [1]*Impl{&Impl{}} // ERROR "&Impl{} escapes to heap$" + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + for _, v = range &m { + } + v.A() + } + + { + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + m := make(map[A]struct{}) // ERROR "make\(map\[A\]struct {}\) does not escape$" + for v = range m { + } + v.A() + } + { + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + m := make(map[A]A) // ERROR "make\(map\[A\]A\) does not escape$" + for v = range m { + } + v.A() + } + { + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + m := make(map[A]A) // ERROR "make\(map\[A\]A\) does not escape$" + for _, v = range m { + } + v.A() + } + { + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + m := make(chan A) + for v = range m { + } + v.A() + } + { + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + m := []A{} // ERROR "\[\]A{} does not escape$" + for _, v = range m { + } + v.A() + } + + { + var v A + m := [1]A{&Impl{}} // ERROR "&Impl{} escapes to heap$" + v = &Impl{} // ERROR "&Impl{} escapes to heap$" + for _, v = range m { + } + v.A() + } + { + var v A + m := [1]A{&Impl{}} // ERROR "&Impl{} escapes to heap$" + v = &Impl{} // ERROR "&Impl{} escapes to heap$" + for _, v = range &m { + } + v.A() + } +} + +var globalInt = 1 + +func testIfInit() { + { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + var i = &Impl{} // ERROR "&Impl{} does not escape$" + if a = i; globalInt == 1 { + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + a.(M).M() // ERROR "devirtualizing a.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + } + { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + var i2 = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + if a = i2; globalInt == 1 { + a.A() + } + a.A() + } +} + +func testSwitchInit() { + { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + var i = &Impl{} // ERROR "&Impl{} does not escape$" + switch a = i; globalInt { + case 12: + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + a.(M).M() // ERROR "devirtualizing a.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + } + { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + var i2 = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + switch a = i2; globalInt { + case 12: + a.A() + } + a.A() + } +} + +type implWrapper Impl + +func (implWrapper) A() {} // ERROR "can inline implWrapper.A$" + +//go:noinline +func devirtWrapperType() { + { + i := &Impl{} // ERROR "&Impl{} does not escape$" + // This is an OCONVNOP, so we have to be careful, not to devirtualize it to Impl.A. + var a A = (*implWrapper)(i) + a.A() // ERROR "devirtualizing a.A to \*implWrapper$" "inlining call to implWrapper.A" + } + { + i := Impl{} + // This is an OCONVNOP, so we have to be careful, not to devirtualize it to Impl.A. + var a A = (implWrapper)(i) // ERROR "implWrapper\(i\) does not escape$" + a.A() // ERROR "devirtualizing a.A to implWrapper$" "inlining call to implWrapper.A" + } + { + type anyWrapper any + var foo any = &Impl{} // ERROR "&Impl\{\} does not escape" + var bar anyWrapper = foo + bar.(M).M() // ERROR "devirtualizing bar\.\(M\).M to \*Impl" "inlining call to \(\*Impl\)\.M" + } +} + +func selfAssigns() { + { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + a = a + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a A = &Impl{} // ERROR "&Impl{} does not escape" + var asAny any = a + asAny = asAny + asAny.(A).A() // ERROR "devirtualizing asAny.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a A = &Impl{} // ERROR "&Impl{} does not escape" + var asAny any = a + a = asAny.(A) + asAny.(A).A() // ERROR "devirtualizing asAny.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + a.(A).A() // ERROR "devirtualizing a.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + b := a + b.(A).A() // ERROR "devirtualizing b.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a A = &Impl{} // ERROR "&Impl{} does not escape" + var asAny any = a + asAny = asAny + a = asAny.(A) + asAny = a + asAny.(A).A() // ERROR "devirtualizing asAny.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + asAny.(M).M() // ERROR "devirtualizing asAny.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + } + { + var a A = &Impl{} // ERROR "&Impl{} does not escape" + var asAny A = a + a = asAny.(A) + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a, b, c A + c = &Impl{} // ERROR "&Impl{} does not escape$" + a = c + c = b + b = c + a = b + b = a + c = a + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } +} + +func boolNoDevirt() { + { + m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$" + var v any = &Impl{} // ERROR "&Impl{} escapes to heap$" + _, v = m[0] // ERROR ".autotmp_[0-9]+ escapes to heap$" + v.(A).A() + } + { + m := make(chan *Impl) + var v any = &Impl{} // ERROR "&Impl{} escapes to heap$" + select { + case _, v = <-m: // ERROR ".autotmp_[0-9]+ escapes to heap$" + } + v.(A).A() + } + { + m := make(chan *Impl) + var v any = &Impl{} // ERROR "&Impl{} escapes to heap$" + _, v = <-m // ERROR ".autotmp_[0-9]+ escapes to heap$" + v.(A).A() + } + { + var a any = 4 // ERROR "4 does not escape$" + var v any = &Impl{} // ERROR "&Impl{} escapes to heap$" + _, v = a.(int) // ERROR ".autotmp_[0-9]+ escapes to heap$" + v.(A).A() + } +} + +func addrTaken() { + { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + var ptrA = &a + a.A() + _ = ptrA + } + { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + var ptrA = &a + *ptrA = &Impl{} // ERROR "&Impl{} escapes to heap$" + a.A() + } + { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + var ptrA = &a + *ptrA = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + a.A() + } +} + +func testInvalidAsserts() { + any(0).(interface{ A() }).A() // ERROR "any\(0\) escapes to heap$" + { + var a M = &Impl{} // ERROR "&Impl{} escapes to heap$" + a.(C).C() // this will panic + a.(any).(C).C() // this will panic + } + { + var a C = &CImpl{} // ERROR "&CImpl{} escapes to heap$" + a.(M).M() // this will panic + a.(any).(M).M() // this will panic + } + { + var a C = &CImpl{} // ERROR "&CImpl{} does not escape$" + + // this will panic + a.(M).(*Impl).M() // ERROR "inlining call to \(\*Impl\).M" + + // this will panic + a.(any).(M).(*Impl).M() // ERROR "inlining call to \(\*Impl\).M" + } +} + +type namedBool bool + +func (namedBool) M() {} // ERROR "can inline namedBool.M$" + +//go:noinline +func namedBoolTest() { + m := map[int]int{} // ERROR "map\[int\]int{} does not escape" + var ok namedBool + _, ok = m[5] + var i M = ok // ERROR "ok does not escape" + i.M() // ERROR "devirtualizing i.M to namedBool$" "inlining call to namedBool.M" +} diff --git a/test/devirtualization_nil_panics.go b/test/devirtualization_nil_panics.go new file mode 100644 index 00000000000..59da454be7f --- /dev/null +++ b/test/devirtualization_nil_panics.go @@ -0,0 +1,100 @@ +// run + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "runtime" + "strings" +) + +type A interface{ A() } + +type Impl struct{} + +func (*Impl) A() {} + +type Impl2 struct{} + +func (*Impl2) A() {} + +func main() { + shouldNilPanic(28, func() { + var v A + v.A() + v = &Impl{} + }) + shouldNilPanic(36, func() { + var v A + defer func() { + v = &Impl{} + }() + v.A() + }) + shouldNilPanic(43, func() { + var v A + f := func() { + v = &Impl{} + } + v.A() + f() + }) + + // Make sure that both devirtualized and non devirtualized + // variants have the panic at the same line. + shouldNilPanic(55, func() { + var v A + defer func() { + v = &Impl{} + }() + v. // A() is on a sepearate line + A() + }) + shouldNilPanic(64, func() { + var v A + defer func() { + v = &Impl{} + v = &Impl2{} // assign different type, such that the call below does not get devirtualized + }() + v. // A() is on a sepearate line + A() + }) +} + +var cnt = 0 + +func shouldNilPanic(wantLine int, f func()) { + cnt++ + defer func() { + p := recover() + if p == nil { + panic("no nil deref panic") + } + if strings.Contains(fmt.Sprintf("%s", p), "invalid memory address or nil pointer dereference") { + callers := make([]uintptr, 128) + n := runtime.Callers(0, callers) + callers = callers[:n] + + frames := runtime.CallersFrames(callers) + line := -1 + for f, next := frames.Next(); next; f, next = frames.Next() { + if f.Func.Name() == fmt.Sprintf("main.main.func%v", cnt) { + line = f.Line + break + } + } + + if line != wantLine { + panic(fmt.Sprintf("invalid line number in panic = %v; want = %v", line, wantLine)) + } + + return + } + panic(p) + }() + f() +} diff --git a/test/devirtualization_with_type_assertions_interleaved.go b/test/devirtualization_with_type_assertions_interleaved.go new file mode 100644 index 00000000000..6bad3beb9aa --- /dev/null +++ b/test/devirtualization_with_type_assertions_interleaved.go @@ -0,0 +1,139 @@ +// errorcheck -0 -m + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package escape + +type hashIface interface { + Sum() []byte +} + +type cloneableHashIface interface { + hashIface + Clone() hashIface +} + +type hash struct{ state [32]byte } + +func (h *hash) Sum() []byte { // ERROR "can inline \(\*hash\).Sum$" "h does not escape$" + return make([]byte, 32) // ERROR "make\(\[\]byte, 32\) escapes to heap$" +} + +func (h *hash) Clone() hashIface { // ERROR "can inline \(\*hash\).Clone$" "h does not escape$" + c := *h // ERROR "moved to heap: c$" + return &c +} + +type hash2 struct{ state [32]byte } + +func (h *hash2) Sum() []byte { // ERROR "can inline \(\*hash2\).Sum$" "h does not escape$" + return make([]byte, 32) // ERROR "make\(\[\]byte, 32\) escapes to heap$" +} + +func (h *hash2) Clone() hashIface { // ERROR "can inline \(\*hash2\).Clone$" "h does not escape$" + c := *h // ERROR "moved to heap: c$" + return &c +} + +func newHash() hashIface { // ERROR "can inline newHash$" + return &hash{} // ERROR "&hash{} escapes to heap$" +} + +func cloneHash1(h hashIface) hashIface { // ERROR "can inline cloneHash1$" "leaking param: h$" + if h, ok := h.(cloneableHashIface); ok { + return h.Clone() + } + return &hash{} // ERROR "&hash{} escapes to heap$" +} + +func cloneHash2(h hashIface) hashIface { // ERROR "can inline cloneHash2$" "leaking param: h$" + if h, ok := h.(cloneableHashIface); ok { + return h.Clone() + } + return nil +} + +func cloneHash3(h hashIface) hashIface { // ERROR "can inline cloneHash3$" "leaking param: h$" + if h, ok := h.(cloneableHashIface); ok { + return h.Clone() + } + return &hash2{} // ERROR "&hash2{} escapes to heap$" +} + +func cloneHashWithBool1(h hashIface) (hashIface, bool) { // ERROR "can inline cloneHashWithBool1$" "leaking param: h$" + if h, ok := h.(cloneableHashIface); ok { + return h.Clone(), true + } + return &hash{}, false // ERROR "&hash{} escapes to heap$" +} + +func cloneHashWithBool2(h hashIface) (hashIface, bool) { // ERROR "can inline cloneHashWithBool2$" "leaking param: h$" + if h, ok := h.(cloneableHashIface); ok { + return h.Clone(), true + } + return nil, false +} + +func cloneHashWithBool3(h hashIface) (hashIface, bool) { // ERROR "can inline cloneHashWithBool3$" "leaking param: h$" + if h, ok := h.(cloneableHashIface); ok { + return h.Clone(), true + } + return &hash2{}, false // ERROR "&hash2{} escapes to heap$" +} + +func interleavedWithTypeAssertions() { + h1 := newHash() // ERROR "&hash{} does not escape$" "inlining call to newHash" + _ = h1.Sum() // ERROR "devirtualizing h1.Sum to \*hash$" "inlining call to \(\*hash\).Sum" "make\(\[\]byte, 32\) does not escape$" + + h2 := cloneHash1(h1) // ERROR "&hash{} does not escape$" "devirtualizing h.Clone to \*hash$" "inlining call to \(\*hash\).Clone" "inlining call to cloneHash1" + _ = h2.Sum() // ERROR "devirtualizing h2.Sum to \*hash$" "inlining call to \(\*hash\).Sum" "make\(\[\]byte, 32\) does not escape$" + + h3 := cloneHash2(h1) // ERROR "devirtualizing h.Clone to \*hash$" "inlining call to \(\*hash\).Clone" "inlining call to cloneHash2" + _ = h3.Sum() // ERROR "devirtualizing h3.Sum to \*hash$" "inlining call to \(\*hash\).Sum" "make\(\[\]byte, 32\) does not escape$" + + h4 := cloneHash3(h1) // ERROR "&hash2{} escapes to heap$" "devirtualizing h.Clone to \*hash$" "inlining call to \(\*hash\).Clone" "inlining call to cloneHash3" "moved to heap: c$" + _ = h4.Sum() + + h5, _ := cloneHashWithBool1(h1) // ERROR "&hash{} does not escape$" "devirtualizing h.Clone to \*hash$" "inlining call to \(\*hash\).Clone" "inlining call to cloneHashWithBool1" + _ = h5.Sum() // ERROR "devirtualizing h5.Sum to \*hash$" "inlining call to \(\*hash\).Sum" "make\(\[\]byte, 32\) does not escape$" + + h6, _ := cloneHashWithBool2(h1) // ERROR "devirtualizing h.Clone to \*hash$" "inlining call to \(\*hash\).Clone" "inlining call to cloneHashWithBool2" + _ = h6.Sum() // ERROR "devirtualizing h6.Sum to \*hash$" "inlining call to \(\*hash\).Sum" "make\(\[\]byte, 32\) does not escape$" + + h7, _ := cloneHashWithBool3(h1) // ERROR "&hash2{} escapes to heap$" "devirtualizing h.Clone to \*hash$" "inlining call to \(\*hash\).Clone" "inlining call to cloneHashWithBool3" "moved to heap: c$" + _ = h7.Sum() +} + +type cloneableHashError interface { + hashIface + Clone() (hashIface, error) +} + +type hash3 struct{ state [32]byte } + +func newHash3() hashIface { // ERROR "can inline newHash3$" + return &hash3{} // ERROR "&hash3{} escapes to heap$" +} + +func (h *hash3) Sum() []byte { // ERROR "can inline \(\*hash3\).Sum$" "h does not escape$" + return make([]byte, 32) // ERROR "make\(\[\]byte, 32\) escapes to heap$" +} + +func (h *hash3) Clone() (hashIface, error) { // ERROR "can inline \(\*hash3\).Clone$" "h does not escape$" + c := *h // ERROR "moved to heap: c$" + return &c, nil +} + +func interleavedCloneableHashError() { + h1 := newHash3() // ERROR "&hash3{} does not escape$" "inlining call to newHash3" + _ = h1.Sum() // ERROR "devirtualizing h1.Sum to \*hash3$" "inlining call to \(\*hash3\).Sum" "make\(\[\]byte, 32\) does not escape$" + + if h1, ok := h1.(cloneableHashError); ok { + h2, err := h1.Clone() // ERROR "devirtualizing h1.Clone to \*hash3$" "inlining call to \(\*hash3\).Clone" + if err == nil { + _ = h2.Sum() // ERROR "devirtualizing h2.Sum to \*hash3$" "inlining call to \(\*hash3\).Sum" "make\(\[\]byte, 32\) does not escape$" + } + } +} diff --git a/test/fixedbugs/bug409.out b/test/fixedbugs/bug409.out index 3cb40ed59a9..93a48451c35 100644 --- a/test/fixedbugs/bug409.out +++ b/test/fixedbugs/bug409.out @@ -1 +1 @@ -+1.000000e+000 +2.000000e+000 +3.000000e+000 +4.000000e+000 +1 2 3 4 diff --git a/test/fixedbugs/issue15747.go b/test/fixedbugs/issue15747.go index 92e762c4e92..743adb6a8ff 100644 --- a/test/fixedbugs/issue15747.go +++ b/test/fixedbugs/issue15747.go @@ -19,7 +19,7 @@ type T struct{ M string } var b bool -func f1(q *Q, xx []byte) interface{} { // ERROR "live at call to newobject: xx$" "live at entry to f1: xx$" +func f1(q *Q, xx []byte) interface{} { // ERROR "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: xx$" "live at entry to f1: xx$" // xx was copied from the stack to the heap on the previous line: // xx was live for the first two prints but then it switched to &xx // being live. We should not see plain xx again. @@ -36,7 +36,7 @@ func f1(q *Q, xx []byte) interface{} { // ERROR "live at call to newobject: xx$" //go:noinline func f2(d []byte, n int) (odata, res []byte, e interface{}) { // ERROR "live at entry to f2: d$" if n > len(d) { - return d, nil, &T{M: "hello"} // ERROR "live at call to newobject: d" + return d, nil, &T{M: "hello"} // ERROR "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: d" } res = d[:n] odata = d[n:] diff --git a/test/fixedbugs/issue19658.go b/test/fixedbugs/issue19658.go index 1e13573b72b..1ad575c269e 100644 --- a/test/fixedbugs/issue19658.go +++ b/test/fixedbugs/issue19658.go @@ -64,8 +64,8 @@ func main() { {"uint64", "8", "panic: 8"}, {"uintptr", "8", "panic: 8"}, {"bool", "true", "panic: true"}, - {"complex64", "8 + 16i", "panic: (+8.000000e+000+1.600000e+001i)"}, - {"complex128", "8+16i", "panic: (+8.000000e+000+1.600000e+001i)"}, + {"complex64", "8 + 16i", "panic: (8+16i)"}, + {"complex128", "8+16i", "panic: (8+16i)"}, {"string", `"test"`, "panic: test"}} { b := bytes.Buffer{} diff --git a/test/fixedbugs/issue35576.out b/test/fixedbugs/issue35576.out index 2aefe3edc5a..0d1b8f673eb 100644 --- a/test/fixedbugs/issue35576.out +++ b/test/fixedbugs/issue35576.out @@ -1 +1 @@ --42+4.200000e+001x-42 +4.200000e+001 x +-4242x-42 42 x diff --git a/test/fixedbugs/issue42284.dir/a.go b/test/fixedbugs/issue42284.dir/a.go index ccf54fad54a..e55f190d7ee 100644 --- a/test/fixedbugs/issue42284.dir/a.go +++ b/test/fixedbugs/issue42284.dir/a.go @@ -22,9 +22,8 @@ func g() { h := E() // ERROR "inlining call to E" "T\(0\) does not escape" h.M() // ERROR "devirtualizing h.M to T" "inlining call to T.M" - // BAD: T(0) could be stack allocated. - i := F(T(0)) // ERROR "inlining call to F" "T\(0\) escapes to heap" + i := F(T(0)) // ERROR "inlining call to F" "T\(0\) does not escape" - // Testing that we do NOT devirtualize here: - i.M() + // It is fine that we devirtualize here, as we add an additional nilcheck. + i.M() // ERROR "devirtualizing i.M to T" "inlining call to T.M" } diff --git a/test/fixedbugs/issue42284.dir/b.go b/test/fixedbugs/issue42284.dir/b.go index 559de591844..4a0b7cea102 100644 --- a/test/fixedbugs/issue42284.dir/b.go +++ b/test/fixedbugs/issue42284.dir/b.go @@ -10,9 +10,8 @@ func g() { h := a.E() // ERROR "inlining call to a.E" "T\(0\) does not escape" h.M() // ERROR "devirtualizing h.M to a.T" "inlining call to a.T.M" - // BAD: T(0) could be stack allocated. - i := a.F(a.T(0)) // ERROR "inlining call to a.F" "a.T\(0\) escapes to heap" + i := a.F(a.T(0)) // ERROR "inlining call to a.F" "a.T\(0\) does not escape" - // Testing that we do NOT devirtualize here: - i.M() + // It is fine that we devirtualize here, as we add an additional nilcheck. + i.M() // ERROR "devirtualizing i.M to a.T" "inlining call to a.T.M" } diff --git a/test/fixedbugs/issue6899.out b/test/fixedbugs/issue6899.out index e2375f07766..ec064f61ba7 100644 --- a/test/fixedbugs/issue6899.out +++ b/test/fixedbugs/issue6899.out @@ -1 +1 @@ --0.000000e+000 +-0 diff --git a/test/fixedbugs/issue76008.go b/test/fixedbugs/issue76008.go new file mode 100644 index 00000000000..bdf273bca1e --- /dev/null +++ b/test/fixedbugs/issue76008.go @@ -0,0 +1,35 @@ +// run + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "runtime" + +func main() { + shouldPanic(func() { + g = any(func() {}) == any(func() {}) + }) + shouldPanic(func() { + g = any(map[int]int{}) == any(map[int]int{}) + }) + shouldPanic(func() { + g = any([]int{}) == any([]int{}) + }) +} + +var g bool + +func shouldPanic(f func()) { + defer func() { + err := recover() + if err == nil { + _, _, line, _ := runtime.Caller(2) + println("did not panic at line", line+1) + } + }() + + f() +} diff --git a/test/goprint.out b/test/goprint.out index da3919ed64f..4f95aa1f881 100644 --- a/test/goprint.out +++ b/test/goprint.out @@ -1 +1 @@ -42 true false true +1.500000e+000 world 0x0 [0/0]0x0 0x0 0x0 255 +42 true false true 1.5 world 0x0 [0/0]0x0 0x0 0x0 255 diff --git a/test/heapsampling.go b/test/heapsampling.go index 741db74f894..db93b253c90 100644 --- a/test/heapsampling.go +++ b/test/heapsampling.go @@ -48,22 +48,23 @@ func testInterleavedAllocations() error { const iters = 50000 // Sizes of the allocations performed by each experiment. frames := []string{"main.allocInterleaved1", "main.allocInterleaved2", "main.allocInterleaved3"} + leafFrame := "main.allocInterleaved" // Pass if at least one of three experiments has no errors. Use a separate // function for each experiment to identify each experiment in the profile. allocInterleaved1(iters) - if checkAllocations(getMemProfileRecords(), frames[0:1], iters, allocInterleavedSizes) == nil { + if checkAllocations(getMemProfileRecords(), leafFrame, frames[0:1], iters, allocInterleavedSizes) == nil { // Passed on first try, report no error. return nil } allocInterleaved2(iters) - if checkAllocations(getMemProfileRecords(), frames[0:2], iters, allocInterleavedSizes) == nil { + if checkAllocations(getMemProfileRecords(), leafFrame, frames[0:2], iters, allocInterleavedSizes) == nil { // Passed on second try, report no error. return nil } allocInterleaved3(iters) // If it fails a third time, we may be onto something. - return checkAllocations(getMemProfileRecords(), frames[0:3], iters, allocInterleavedSizes) + return checkAllocations(getMemProfileRecords(), leafFrame, frames[0:3], iters, allocInterleavedSizes) } var allocInterleavedSizes = []int64{17 * 1024, 1024, 18 * 1024, 512, 16 * 1024, 256} @@ -108,22 +109,23 @@ func testSmallAllocations() error { // Sizes of the allocations performed by each experiment. sizes := []int64{1024, 512, 256} frames := []string{"main.allocSmall1", "main.allocSmall2", "main.allocSmall3"} + leafFrame := "main.allocSmall" // Pass if at least one of three experiments has no errors. Use a separate // function for each experiment to identify each experiment in the profile. allocSmall1(iters) - if checkAllocations(getMemProfileRecords(), frames[0:1], iters, sizes) == nil { + if checkAllocations(getMemProfileRecords(), leafFrame, frames[0:1], iters, sizes) == nil { // Passed on first try, report no error. return nil } allocSmall2(iters) - if checkAllocations(getMemProfileRecords(), frames[0:2], iters, sizes) == nil { + if checkAllocations(getMemProfileRecords(), leafFrame, frames[0:2], iters, sizes) == nil { // Passed on second try, report no error. return nil } allocSmall3(iters) // If it fails a third time, we may be onto something. - return checkAllocations(getMemProfileRecords(), frames[0:3], iters, sizes) + return checkAllocations(getMemProfileRecords(), leafFrame, frames[0:3], iters, sizes) } // allocSmall performs only small allocations for sanity testing. @@ -161,21 +163,21 @@ func allocSmall3(n int) { // Look only at samples that include the named frames, and group the // allocations by their line number. All these allocations are done from // the same leaf function, so their line numbers are the same. -func checkAllocations(records []runtime.MemProfileRecord, frames []string, count int64, size []int64) error { +func checkAllocations(records []runtime.MemProfileRecord, leafFrame string, frames []string, count int64, size []int64) error { objectsPerLine := map[int][]int64{} bytesPerLine := map[int][]int64{} totalCount := []int64{} // Compute the line number of the first allocation. All the // allocations are from the same leaf, so pick the first one. var firstLine int - for ln := range allocObjects(records, frames[0]) { + for ln := range allocObjects(records, leafFrame, frames[0]) { if firstLine == 0 || firstLine > ln { firstLine = ln } } for _, frame := range frames { var objectCount int64 - a := allocObjects(records, frame) + a := allocObjects(records, leafFrame, frame) for s := range size { // Allocations of size size[s] should be on line firstLine + s. ln := firstLine + s @@ -258,7 +260,7 @@ type allocStat struct { // allocObjects examines the profile records for samples including the // named function and returns the allocation stats aggregated by // source line number of the allocation (at the leaf frame). -func allocObjects(records []runtime.MemProfileRecord, function string) map[int]allocStat { +func allocObjects(records []runtime.MemProfileRecord, leafFrame, function string) map[int]allocStat { a := make(map[int]allocStat) for _, r := range records { var pcs []uintptr @@ -273,7 +275,7 @@ func allocObjects(records []runtime.MemProfileRecord, function string) map[int]a for { frame, more := frames.Next() name := frame.Function - if line == 0 { + if name == leafFrame && line == 0 { line = frame.Line } if name == function { diff --git a/test/ken/cplx0.out b/test/ken/cplx0.out index 7627c28df7a..03dfb1e5093 100644 --- a/test/ken/cplx0.out +++ b/test/ken/cplx0.out @@ -1,4 +1,4 @@ -(+5.000000e+000+6.000000e+000i) -(+5.000000e+000+6.000000e+000i) -(+5.000000e+000+6.000000e+000i) -(+5.000000e+000+6.000000e+000i) +(5+6i) +(5+6i) +(5+6i) +(5+6i) diff --git a/test/live.go b/test/live.go index 6e1a60557c8..56b78ccf8b4 100644 --- a/test/live.go +++ b/test/live.go @@ -467,9 +467,9 @@ func f27defer(b bool) { func f27go(b bool) { x := 0 if b { - go call27(func() { x++ }) // ERROR "live at call to newobject: &x$" "live at call to newobject: &x .autotmp_[0-9]+$" "live at call to newproc: &x$" // allocate two closures, the func literal, and the wrapper for go + go call27(func() { x++ }) // ERROR "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: &x$" "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: &x .autotmp_[0-9]+$" "live at call to newproc: &x$" // allocate two closures, the func literal, and the wrapper for go } - go call27(func() { x++ }) // ERROR "live at call to newobject: &x$" "live at call to newobject: .autotmp_[0-9]+$" // allocate two closures, the func literal, and the wrapper for go + go call27(func() { x++ }) // ERROR "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: &x$" "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: .autotmp_[0-9]+$" // allocate two closures, the func literal, and the wrapper for go printnl() } @@ -538,7 +538,7 @@ func f31(b1, b2, b3 bool) { g31(g18()) // ERROR "stack object .autotmp_[0-9]+ \[2\]string$" } if b2 { - h31(g18()) // ERROR "live at call to convT: .autotmp_[0-9]+$" "live at call to newobject: .autotmp_[0-9]+$" + h31(g18()) // ERROR "live at call to convT: .autotmp_[0-9]+$" "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: .autotmp_[0-9]+$" } if b3 { panic(g18()) @@ -665,14 +665,14 @@ func f39a() (x []int) { func f39b() (x [10]*int) { x = [10]*int{} - x[0] = new(int) // ERROR "live at call to newobject: x$" + x[0] = new(int) // ERROR "live at call to mallocTiny[48]: x$" printnl() // ERROR "live at call to printnl: x$" return x } func f39c() (x [10]*int) { x = [10]*int{} - x[0] = new(int) // ERROR "live at call to newobject: x$" + x[0] = new(int) // ERROR "live at call to mallocTiny[48]: x$" printnl() // ERROR "live at call to printnl: x$" return } diff --git a/test/live_regabi.go b/test/live_regabi.go index d2565ba9130..838cbdefad7 100644 --- a/test/live_regabi.go +++ b/test/live_regabi.go @@ -465,9 +465,9 @@ func f27defer(b bool) { func f27go(b bool) { x := 0 if b { - go call27(func() { x++ }) // ERROR "live at call to newobject: &x$" "live at call to newobject: &x .autotmp_[0-9]+$" "live at call to newproc: &x$" // allocate two closures, the func literal, and the wrapper for go + go call27(func() { x++ }) // ERROR "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: &x$" "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: &x .autotmp_[0-9]+$" "live at call to newproc: &x$" // allocate two closures, the func literal, and the wrapper for go } - go call27(func() { x++ }) // ERROR "live at call to newobject: &x$" "live at call to newobject: .autotmp_[0-9]+$" // allocate two closures, the func literal, and the wrapper for go + go call27(func() { x++ }) // ERROR "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: &x$" "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: .autotmp_[0-9]+$" // allocate two closures, the func literal, and the wrapper for go printnl() } @@ -536,7 +536,7 @@ func f31(b1, b2, b3 bool) { g31(g18()) // ERROR "stack object .autotmp_[0-9]+ \[2\]string$" } if b2 { - h31(g18()) // ERROR "live at call to convT: .autotmp_[0-9]+$" "live at call to newobject: .autotmp_[0-9]+$" + h31(g18()) // ERROR "live at call to convT: .autotmp_[0-9]+$" "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: .autotmp_[0-9]+$" } if b3 { panic(g18()) @@ -663,14 +663,14 @@ func f39a() (x []int) { func f39b() (x [10]*int) { x = [10]*int{} - x[0] = new(int) // ERROR "live at call to newobject: x$" + x[0] = new(int) // ERROR "live at call to mallocTiny[48]: x$" printnl() // ERROR "live at call to printnl: x$" return x } func f39c() (x [10]*int) { x = [10]*int{} - x[0] = new(int) // ERROR "live at call to newobject: x$" + x[0] = new(int) // ERROR "live at call to mallocTiny[48]: x$" printnl() // ERROR "live at call to printnl: x$" return } diff --git a/test/print.out b/test/print.out index 85376af0c78..e19a8617f47 100644 --- a/test/print.out +++ b/test/print.out @@ -9,8 +9,8 @@ 7 7 7 -+8.000000e+000 -(+9.000000e+000+1.000000e+001i) +8 +(9+10i) true false hello @@ -19,8 +19,8 @@ one two hello false true -(+1.400000e+001+1.500000e+001i) -+1.300000e+001 +(14+15i) +13 12 12 12 diff --git a/test/prove.go b/test/prove.go index bcc023dfec6..365e8ba006e 100644 --- a/test/prove.go +++ b/test/prove.go @@ -1,6 +1,6 @@ // errorcheck -0 -d=ssa/prove/debug=1 -//go:build amd64 +//go:build amd64 || arm64 // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style @@ -971,40 +971,6 @@ func negIndex2(n int) { useSlice(c) } -// Check that prove is zeroing these right shifts of positive ints by bit-width - 1. -// e.g (Rsh64x64 n (Const64 [63])) && ft.isNonNegative(n) -> 0 -func sh64(n int64) int64 { - if n < 0 { - return n - } - return n >> 63 // ERROR "Proved Rsh64x64 shifts to zero" -} - -func sh32(n int32) int32 { - if n < 0 { - return n - } - return n >> 31 // ERROR "Proved Rsh32x64 shifts to zero" -} - -func sh32x64(n int32) int32 { - if n < 0 { - return n - } - return n >> uint64(31) // ERROR "Proved Rsh32x64 shifts to zero" -} - -func sh16(n int16) int16 { - if n < 0 { - return n - } - return n >> 15 // ERROR "Proved Rsh16x64 shifts to zero" -} - -func sh64noopt(n int64) int64 { - return n >> 63 // not optimized; n could be negative -} - // These cases are division of a positive signed integer by a power of 2. // The opt pass doesnt have sufficient information to see that n is positive. // So, instead, opt rewrites the division with a less-than-optimal replacement. @@ -1018,21 +984,21 @@ func divShiftClean(n int) int { if n < 0 { return n } - return n / int(8) // ERROR "Proved Rsh64x64 shifts to zero" + return n / int(8) // ERROR "Proved Div64 is unsigned$" } func divShiftClean64(n int64) int64 { if n < 0 { return n } - return n / int64(16) // ERROR "Proved Rsh64x64 shifts to zero" + return n / int64(16) // ERROR "Proved Div64 is unsigned$" } func divShiftClean32(n int32) int32 { if n < 0 { return n } - return n / int32(16) // ERROR "Proved Rsh32x64 shifts to zero" + return n / int32(16) // ERROR "Proved Div32 is unsigned$" } // Bounds check elimination @@ -1078,6 +1044,23 @@ func divu(x, y uint) int { return 0 } +func divuRoundUp(x, y, z uint) int { + x &= ^uint(0) >> 8 // can't overflow in add + y = min(y, 0xff-1) + z = max(z, 0xff) + r := (x + y) / z // ERROR "Proved Neq64$" + if r <= x { // ERROR "Proved Leq64U$" + return 1 + } + return 0 +} + +func divuRoundUpSlice(x []string) { + halfRoundedUp := uint(len(x)+1) / 2 + _ = x[:halfRoundedUp] // ERROR "Proved IsSliceInBounds$" + _ = x[halfRoundedUp:] // ERROR "Proved IsSliceInBounds$" +} + func modu1(x, y uint) int { z := x % y if z < y { // ERROR "Proved Less64U$" @@ -1095,7 +1078,7 @@ func modu2(x, y uint) int { } func issue57077(s []int) (left, right []int) { - middle := len(s) / 2 + middle := len(s) / 2 // ERROR "Proved Div64 is unsigned$" left = s[:middle] // ERROR "Proved IsSliceInBounds$" right = s[middle:] // ERROR "Proved IsSliceInBounds$" return @@ -1484,7 +1467,7 @@ func mod64sPositiveWithSmallerDividendMax(a, b int64, ensureBothBranchesCouldHap a = min(a, 0xff) b = min(b, 0xfff) - z := a % b // ERROR "Proved Mod64 does not need fix-up$" + z := a % b // ERROR "Proved Mod64 is unsigned$" if ensureBothBranchesCouldHappen { if z > 0xff { // ERROR "Disproved Less64$" @@ -1504,7 +1487,7 @@ func mod64sPositiveWithSmallerDivisorMax(a, b int64, ensureBothBranchesCouldHapp a = min(a, 0xfff) b = min(b, 0xff) - z := a % b // ERROR "Proved Mod64 does not need fix-up$" + z := a % b // ERROR "Proved Mod64 is unsigned$" if ensureBothBranchesCouldHappen { if z > 0xff-1 { // ERROR "Disproved Less64$" @@ -1524,7 +1507,7 @@ func mod64sPositiveWithIdenticalMax(a, b int64, ensureBothBranchesCouldHappen bo a = min(a, 0xfff) b = min(b, 0xfff) - z := a % b // ERROR "Proved Mod64 does not need fix-up$" + z := a % b // ERROR "Proved Mod64 is unsigned$" if ensureBothBranchesCouldHappen { if z > 0xfff-1 { // ERROR "Disproved Less64$" @@ -1569,7 +1552,7 @@ func div64s(a, b int64, ensureAllBranchesCouldHappen func() bool) int64 { b = min(b, 0xff) b = max(b, 0xf) - z := a / b // ERROR "(Proved Div64 does not need fix-up|Proved Neq64)$" + z := a / b // ERROR "Proved Div64 is unsigned|Proved Neq64" if ensureAllBranchesCouldHappen() && z > 0xffff/0xf { // ERROR "Disproved Less64$" return 42 @@ -2342,6 +2325,328 @@ func setCapMaxBasedOnElementSize(x []uint64) int { return 0 } +func issue75144for(a, b []uint64) bool { + if len(a) == len(b) { + for len(a) > 4 { + a = a[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + b = b[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + } + if len(a) == len(b) { // ERROR "Proved Eq64$" + return true + } + } + return false +} + +func issue75144if(a, b []uint64) bool { + if len(a) == len(b) { + if len(a) > 4 { + a = a[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + b = b[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + } + if len(a) == len(b) { // ERROR "Proved Eq64$" + return true + } + } + return false +} + +func issue75144if2(a, b, c, d []uint64) (r bool) { + if len(a) != len(b) || len(c) != len(d) { + return + } + if len(a) <= 4 || len(c) <= 4 { + return + } + if len(a) < len(c) { + c = c[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + d = d[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + } else { + a = a[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + b = b[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + } + if len(a) == len(c) { + return + } + if len(a) == len(b) { // ERROR "Proved Eq64$" + r = true + } + if len(c) == len(d) { // ERROR "Proved Eq64$" + r = true + } + return +} + +func issue75144forCannot(a, b []uint64) bool { + if len(a) == len(b) { + for len(a) > 4 { + a = a[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + b = b[4:] + for len(a) > 2 { + a = a[2:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + b = b[2:] + } + } + if len(a) == len(b) { + return true + } + } + return false +} + +func issue75144ifCannot(a, b []uint64) bool { + if len(a) == len(b) { + if len(a) > 4 { + a = a[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + b = b[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + if len(a) > 2 { + a = a[2:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + b = b[2:] + } + } + if len(a) == len(b) { + return true + } + } + return false +} + +func issue75144ifCannot2(a, b []uint64) bool { + if len(a) == len(b) { + if len(a) > 4 { + a = a[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + b = b[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + } else if len(a) > 2 { + a = a[2:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + b = b[2:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + } + if len(a) == len(b) { + return true + } + } + return false +} + +func issue75144forNot(a, b []uint64) bool { + if len(a) == len(b) { + for len(a) > 4 { + a = a[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + b = b[3:] + } + if len(a) == len(b) { + return true + } + } + return false +} + +func issue75144forNot2(a, b, c []uint64) bool { + if len(a) == len(b) { + for len(a) > 4 { + a = a[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + b = c[4:] + } + if len(a) == len(b) { + return true + } + } + return false +} + +func issue75144ifNot(a, b []uint64) bool { + if len(a) == len(b) { + if len(a) > 4 { + a = a[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + } else { + b = b[4:] + } + if len(a) == len(b) { + return true + } + } + return false +} + +func mulIntoAnd(a, b uint) uint { + if a > 1 || b > 1 { + return 0 + } + return a * b // ERROR "Rewrote Mul v[0-9]+ into And$" +} + +func mulIntoCondSelect(a, b uint) uint { + if a > 1 { + return 0 + } + return a * b // ERROR "Rewrote Mul v[0-9]+ into CondSelect" +} + +func div7pos(x int32) bool { + if x > 0 { + return x%7 == 0 // ERROR "Proved Div32 is unsigned" + } + return false +} + +func div2pos(x []int) int { + return len(x) / 2 // ERROR "Proved Div64 is unsigned" +} + +func div3pos(x []int) int { + return len(x) / 3 // ERROR "Proved Div64 is unsigned" +} + + +var len200 [200]int + +func modbound1(u uint64) int { + s := 0 + for u > 0 { + var d uint64 + u, d = u/100, u%100 + s += len200[d*2+1] // ERROR "Proved IsInBounds" + } + return s +} + +func modbound2(p *[10]int, x uint) int { + return p[x%9+1] // ERROR "Proved IsInBounds" +} + +func shiftbound(x int) int { + return 1 << (x % 11) // ERROR "Proved Lsh(32x32|64x64) bounded" "Proved Div64 does not need fix-up" +} + +func shiftbound2(x int) int { + return 1 << (x % 8) // ERROR "Proved Lsh(32x32|64x64) bounded" "Proved Div64 does not need fix-up" +} + +func rangebound1(x []int) int { + s := 0 + for i := range 1000 { // ERROR "Induction variable" + if i < len(x) { + s += x[i] // ERROR "Proved IsInBounds" + } + } + return s +} + +func rangebound2(x []int) int { + s := 0 + if len(x) > 0 { + for i := range 1000 { // ERROR "Induction variable" + s += x[i%len(x)] // ERROR "Proved Mod64 is unsigned" "Proved Neq64" "Proved IsInBounds" + } + } + return s +} + +func swapbound(v []int) { + for i := 0; i < len(v)/2; i++ { // ERROR "Proved Div64 is unsigned|Induction variable" + v[i], // ERROR "Proved IsInBounds" + v[len(v)-1-i] = // ERROR "Proved IsInBounds" + v[len(v)-1-i], + v[i] // ERROR "Proved IsInBounds" + } +} + +func rightshift(v *[256]int) int { + for i := range 1024 { // ERROR "Induction" + if v[i/32] == 0 { // ERROR "Proved Div64 is unsigned" "Proved IsInBounds" + return i + } + } + for i := range 1024 { // ERROR "Induction" + if v[i>>2] == 0 { // ERROR "Proved IsInBounds" + return i + } + } + return -1 +} + +func rightShiftBounds(v, s int) { + // The ignored "Proved" messages on the shift itself are about whether s >= 0 or s < 32 or 64. + // We care about the bounds for x printed on the prove(x) lines. + + if -8 <= v && v <= -2 && 1 <= s && s <= 3 { + x := v>>s // ERROR "Proved" + prove(x) // ERROR "Proved sm,SM=-4,-1 " + } + if -80 <= v && v <= -20 && 1 <= s && s <= 3 { + x := v>>s // ERROR "Proved" + prove(x) // ERROR "Proved sm,SM=-40,-3 " + } + if -8 <= v && v <= 10 && 1 <= s && s <= 3 { + x := v>>s // ERROR "Proved" + prove(x) // ERROR "Proved sm,SM=-4,5 " + } + if 2 <= v && v <= 10 && 1 <= s && s <= 3 { + x := v>>s // ERROR "Proved" + prove(x) // ERROR "Proved sm,SM=0,5 " + } + + if -8 <= v && v <= -2 && 0 <= s && s <= 3 { + x := v>>s // ERROR "Proved" + prove(x) // ERROR "Proved sm,SM=-8,-1 " + } + if -80 <= v && v <= -20 && 0 <= s && s <= 3 { + x := v>>s // ERROR "Proved" + prove(x) // ERROR "Proved sm,SM=-80,-3 " + } + if -8 <= v && v <= 10 && 0 <= s && s <= 3 { + x := v>>s // ERROR "Proved" + prove(x) // ERROR "Proved sm,SM=-8,10 " + } + if 2 <= v && v <= 10 && 0 <= s && s <= 3 { + x := v>>s // ERROR "Proved" + prove(x) // ERROR "Proved sm,SM=0,10 " + } + + if -8 <= v && v <= -2 && -1 <= s && s <= 3 { + x := v>>s // ERROR "Proved" + prove(x) // ERROR "Proved sm,SM=-8,-1 " + } + if -80 <= v && v <= -20 && -1 <= s && s <= 3 { + x := v>>s // ERROR "Proved" + prove(x) // ERROR "Proved sm,SM=-80,-3 " + } + if -8 <= v && v <= 10 && -1 <= s && s <= 3 { + x := v>>s // ERROR "Proved" + prove(x) // ERROR "Proved sm,SM=-8,10 " + } + if 2 <= v && v <= 10 && -1 <= s && s <= 3 { + x := v>>s // ERROR "Proved" + prove(x) // ERROR "Proved sm,SM=0,10 " + } +} + +func unsignedRightShiftBounds(v uint, s int) { + if 2 <= v && v <= 10 && -1 <= s && s <= 3 { + x := v>>s // ERROR "Proved" + proveu(x) // ERROR "Proved sm,SM=0,10 " + } + if 2 <= v && v <= 10 && 0 <= s && s <= 3 { + x := v>>s // ERROR "Proved" + proveu(x) // ERROR "Proved sm,SM=0,10 " + } + if 2 <= v && v <= 10 && 1 <= s && s <= 3 { + x := v>>s // ERROR "Proved" + proveu(x) // ERROR "Proved sm,SM=0,5 " + } + if 20 <= v && v <= 100 && 1 <= s && s <= 3 { + x := v>>s // ERROR "Proved" + proveu(x) // ERROR "Proved sm,SM=2,50 " + } +} + +//go:noinline +func prove(x int) { +} + +//go:noinline +func proveu(x uint) { +} + //go:noinline func useInt(a int) { } diff --git a/test/prove_constant_folding.go b/test/prove_constant_folding.go index 366c446b830..46764f9b9d9 100644 --- a/test/prove_constant_folding.go +++ b/test/prove_constant_folding.go @@ -1,6 +1,6 @@ // errorcheck -0 -d=ssa/prove/debug=2 -//go:build amd64 +//go:build amd64 || arm64 // Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style @@ -17,17 +17,65 @@ func f0i(x int) int { return x + 5 // ERROR "Proved.+is constant 0$" "Proved.+is constant 5$" "x\+d >=? w" } - return x / 2 + return x + 1 } -func f0u(x uint) uint { +func f0u(x uint) int { if x == 20 { - return x // ERROR "Proved.+is constant 20$" + return int(x) // ERROR "Proved.+is constant 20$" } if (x + 20) == 20 { - return x + 5 // ERROR "Proved.+is constant 0$" "Proved.+is constant 5$" "x\+d >=? w" + return int(x + 5) // ERROR "Proved.+is constant 0$" "Proved.+is constant 5$" "x\+d >=? w" } - return x / 2 + if x < 1000 { + return int(x)>>31 // ERROR "Proved.+is constant 0$" + } + if x := int32(x); x < -1000 { + return int(x>>31) // ERROR "Proved.+is constant -1$" + } + + return int(x) + 1 +} + +// Check that prove is zeroing these right shifts of positive ints by bit-width - 1. +// e.g (Rsh64x64 n (Const64 [63])) && ft.isNonNegative(n) -> 0 +func sh64(n int64) int64 { + if n < 0 { + return n + } + return n >> 63 // ERROR "Proved .+ is constant 0$" +} + +func sh32(n int32) int32 { + if n < 0 { + return n + } + return n >> 31 // ERROR "Proved .+ is constant 0$" +} + +func sh32x64(n int32) int32 { + if n < 0 { + return n + } + return n >> uint64(31) // ERROR "Proved .+ is constant 0$" +} + +func sh32x64n(n int32) int32 { + if n >= 0 { + return 0 + } + return n >> 31// ERROR "Proved .+ is constant -1$" +} + +func sh16(n int16) int16 { + if n < 0 { + return n + } + return n >> 15 // ERROR "Proved .+ is constant 0$" +} + +func sh64noopt(n int64) int64 { + return n >> 63 // not optimized; n could be negative } diff --git a/test/prove_invert_loop_with_unused_iterators.go b/test/prove_invert_loop_with_unused_iterators.go index c66f20b6e93..6feef1d41b3 100644 --- a/test/prove_invert_loop_with_unused_iterators.go +++ b/test/prove_invert_loop_with_unused_iterators.go @@ -1,6 +1,6 @@ // errorcheck -0 -d=ssa/prove/debug=1 -//go:build amd64 +//go:build amd64 || arm64 package main diff --git a/test/typeparam/issue46461.go b/test/typeparam/issue46461.go index 363a87cfe08..7e35106c15b 100644 --- a/test/typeparam/issue46461.go +++ b/test/typeparam/issue46461.go @@ -1,4 +1,4 @@ -// errorcheck +// compile // Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style @@ -6,7 +6,7 @@ package p -type T[U interface{ M() T[U] }] int // ERROR "invalid recursive type: T refers to itself" +type T[U interface{ M() T[U] }] int type X int diff --git a/test/typeparam/issue46461b.dir/a.go b/test/typeparam/issue46461b.dir/a.go index fcb414266d7..0d53b3e2042 100644 --- a/test/typeparam/issue46461b.dir/a.go +++ b/test/typeparam/issue46461b.dir/a.go @@ -4,4 +4,4 @@ package a -type T[U interface{ M() int }] int +type T[U interface{ M() T[U] }] int diff --git a/test/typeparam/issue46461b.dir/b.go b/test/typeparam/issue46461b.dir/b.go index a4583257ffd..3393a375c20 100644 --- a/test/typeparam/issue46461b.dir/b.go +++ b/test/typeparam/issue46461b.dir/b.go @@ -8,6 +8,4 @@ import "./a" type X int -func (X) M() int { return 0 } - -type _ a.T[X] +func (X) M() a.T[X] { return 0 } diff --git a/test/typeparam/issue48280.dir/a.go b/test/typeparam/issue48280.dir/a.go index f66fd30e34e..17859e6aa90 100644 --- a/test/typeparam/issue48280.dir/a.go +++ b/test/typeparam/issue48280.dir/a.go @@ -4,7 +4,7 @@ package a -type I[T any] interface { +type I[T I[T]] interface { F() T } diff --git a/test/typeparam/issue48306.dir/a.go b/test/typeparam/issue48306.dir/a.go index fdfd86cb6d4..739750b20b3 100644 --- a/test/typeparam/issue48306.dir/a.go +++ b/test/typeparam/issue48306.dir/a.go @@ -4,6 +4,6 @@ package a -type I[T any] interface { +type I[T I[T]] interface { F() T } diff --git a/test/typeparam/typeswitch5.out b/test/typeparam/typeswitch5.out index 6b4cb4416f4..64aa940516e 100644 --- a/test/typeparam/typeswitch5.out +++ b/test/typeparam/typeswitch5.out @@ -1,4 +1,4 @@ fooer 6 other other -fooer +9.000000e+000 +fooer 9 diff --git a/test/uintptrescapes2.go b/test/uintptrescapes2.go index 656286c0ff2..e111d47fab9 100644 --- a/test/uintptrescapes2.go +++ b/test/uintptrescapes2.go @@ -33,8 +33,8 @@ func (T) M1(a uintptr) {} // ERROR "escaping uintptr" func (T) M2(a ...uintptr) {} // ERROR "escaping ...uintptr" func TestF1() { - var t int // ERROR "moved to heap" - F1(uintptr(unsafe.Pointer(&t))) // ERROR "live at call to F1: .?autotmp" "stack object .autotmp_[0-9]+ unsafe.Pointer$" + var t int // ERROR "moved to heap" + F1(uintptr(unsafe.Pointer(&t))) // ERROR "live at call to F1: .?autotmp" "stack object .autotmp_[0-9]+ unsafe.Pointer$" } func TestF3() { @@ -49,17 +49,17 @@ func TestM1() { } func TestF2() { - var v int // ERROR "moved to heap" - F2(0, 1, uintptr(unsafe.Pointer(&v)), 2) // ERROR "live at call to newobject: .?autotmp" "live at call to F2: .?autotmp" "escapes to heap" "stack object .autotmp_[0-9]+ unsafe.Pointer$" + var v int // ERROR "moved to heap" + F2(0, 1, uintptr(unsafe.Pointer(&v)), 2) // ERROR "live at call to mallocgcSmallNoScanSC[0-9]+: .?autotmp" "live at call to F2: .?autotmp" "escapes to heap" "stack object .autotmp_[0-9]+ unsafe.Pointer$" } func TestF4() { var v2 int // ERROR "moved to heap" - F4(0, 1, uintptr(unsafe.Pointer(&v2)), 2) // ERROR "live at call to newobject: .?autotmp" "live at call to F4: .?autotmp" "escapes to heap" "stack object .autotmp_[0-9]+ unsafe.Pointer$" + F4(0, 1, uintptr(unsafe.Pointer(&v2)), 2) // ERROR "live at call to mallocgcSmallNoScanSC[0-9]+: .?autotmp" "live at call to F4: .?autotmp" "escapes to heap" "stack object .autotmp_[0-9]+ unsafe.Pointer$" } func TestM2() { var t T var v int // ERROR "moved to heap" - t.M2(0, 1, uintptr(unsafe.Pointer(&v)), 2) // ERROR "live at call to newobject: .?autotmp" "live at call to T.M2: .?autotmp" "escapes to heap" "stack object .autotmp_[0-9]+ unsafe.Pointer$" + t.M2(0, 1, uintptr(unsafe.Pointer(&v)), 2) // ERROR "live at call to mallocgcSmallNoScanSC[0-9]+: .?autotmp" "live at call to T.M2: .?autotmp" "escapes to heap" "stack object .autotmp_[0-9]+ unsafe.Pointer$" } diff --git a/test/wasmmemsize.dir/main.go b/test/wasmmemsize.dir/main.go index e3aa5b5e921..c51e6b3b047 100644 --- a/test/wasmmemsize.dir/main.go +++ b/test/wasmmemsize.dir/main.go @@ -9,17 +9,19 @@ import ( "io" ) -// Expect less than 3 MB of memory usage for a small wasm program. -// This reflects the current allocator. If the allocator changes, -// update this value. -const want = 3 << 20 +// Wasm page size. +const pageSize = 64 * 1024 + +// Expect less than 3 MB + 1 page of memory usage for a small wasm +// program. This reflects the current allocator. If the allocator +// changes, update this value. +const want = 3<<20 + pageSize var w = io.Discard func main() { fmt.Fprintln(w, "hello world") - const pageSize = 64 * 1024 sz := uintptr(currentMemory()) * pageSize if sz > want { fmt.Printf("FAIL: unexpected memory size %d, want <= %d\n", sz, want)