[dev.simd] all: merge master (cf5e993) into dev.simd

Merge List:

+ 2025-09-11 cf5e993177 cmd/link: allow one to specify the data section in the internal linker
+ 2025-09-11 cdb3d467fa encoding/gob: make use of reflect.TypeAssert
+ 2025-09-11 fef360964c archive/tar: fix typo in benchmark name
+ 2025-09-11 7d562b8460 syscall: actually remove unreachable code
+ 2025-09-11 c349582344 crypto/rsa: don't test CL 687836 against v1.0.0 FIPS 140-3 module
+ 2025-09-11 253dd08f5d debug/macho: filter non-external symbols when reading imported symbols without LC_DYSYMTAB
+ 2025-09-10 2009e6c596 internal/runtime/maps: remove redundant package docs
+ 2025-09-10 de5d7eccb9 runtime/internal/maps: only conditionally clear groups when sparse
+ 2025-09-10 8098b99547 internal/runtime/maps: speed up Clear
+ 2025-09-10 fe5420b054 cmd: delete some more windows/arm remnants
+ 2025-09-10 fad1dc608d runtime: don't artificially limit TestReadMetricsSched
+ 2025-09-10 b1f3e38e41 cmd/compile: when CSEing two values, prefer the statement marked one
+ 2025-09-10 00824f5ff5 types2: better documentation for resolve()
+ 2025-09-10 5cf8ca42e3 internal/trace/raw: use strings.Cut instead of strings.SplitN 2
+ 2025-09-10 80a2aae922 Revert "cmd/compile: improve stp merging for non-sequent cases"
+ 2025-09-10 f327a05419 go/token, syscall: annotate if blocks that defeat vet's unreachable pass
+ 2025-09-10 9650c97d0f syscall: remove unreachable code
+ 2025-09-10 f1c4b860d4 Revert "crypto/internal/fips140: update frozen module version to "v1.0.0""
+ 2025-09-10 30686c4cc8 encoding/json/v2: document context annotation with SemanticError
+ 2025-09-09 c5737dc21b runtime: when using cgo on 386, call C sigaction function
+ 2025-09-09 b9a4a09b0f runtime: remove duff support for riscv64
+ 2025-09-09 4dac9e093f cmd/compile: use generated loops instead of DUFFCOPY on riscv64
+ 2025-09-09 879ff736d3 cmd/compile: use generated loops instead of DUFFZERO on riscv64
+ 2025-09-09 77643dc63f cmd/compile: simplify zerorange on riscv64
+ 2025-09-09 e6605a1bcc encoding/json: use reflect.TypeAssert
+ 2025-09-09 4c20f7f15a cmd/cgo: run gcc to get errors and debug info in parallel
+ 2025-09-09 5dcedd6550 runtime: lock mheap_.speciallock when allocating synctest specials
+ 2025-09-09 d3be949ada runtime: don't negate eventfd errno
+ 2025-09-09 836fa74518 syscall: optimise cgo clearenv
+ 2025-09-09 ce39174482 crypto/rsa: check PrivateKey.D for consistency with Dp and Dq
+ 2025-09-09 5d9d0513dc crypto/rsa: check for post-Precompute changes in Validate
+ 2025-09-09 968a5107a9 crypto/internal/fips140: update frozen module version to "v1.0.0"
+ 2025-09-09 645ee44492 crypto/ecdsa: deprecate direct use of big.Int fields in keys
+ 2025-09-09 a67977da5e cmd/compile/internal/inline: ignore superfluous slicing
+ 2025-09-09 a5fa5ea51c cmd/compile/internal/ssa: expand runtime.memequal for length {3,5,6,7}
+ 2025-09-09 4c63d798cb cmd/compile: improve stp merging for non-sequent cases
+ 2025-09-09 bdd51e7855 cmd/compile: use constant zero register instead of specialized zero instructions on mips64x
+ 2025-09-09 10ac80de77 cmd/compile: introduce CCMP generation
+ 2025-09-09 3b3b16957c Revert "cmd/go: use os.Rename to move files on Windows"
+ 2025-09-09 e3223518b8 cmd/go: split generating cover files into its own action
+ 2025-09-09 af03343f93 cmd/compile: fix bounds check report
+ 2025-09-08 6447ff409a cmd/compile: fold constant in ADDshift op on loong64
+ 2025-09-08 5b218461f9 cmd/compile: optimize loads from abi.Type.{Size_,PtrBytes,Kind_}
+ 2025-09-08 b915e14490 cmd/compile: consolidate logic for rewriting fixed loads
+ 2025-09-08 06e791c0cd cmd/compile: simplify zerorange on mips
+ 2025-09-08 cf42b785b7 cmd/cgo: run recordTypes for each of the debugs at the end of Translate
+ 2025-09-08 5e6296f3f8 archive/tar: optimize nanosecond parsing in parsePAXTime
+ 2025-09-08 ea00650784 debug/pe: permit symbols with no name
+ 2025-09-08 4cc7cc74c3 crypto: update Hash comments to point to crypto/sha3
+ 2025-09-08 ff45d5d53c encoding/json/internal/jsonflags: fix comment with wrong field name
+ 2025-09-06 861c90c907 net/http: pool transport gzip readers
+ 2025-09-06 57769b5532 os: reject OpenDir of a non-directory file in Plan 9
+ 2025-09-06 a6144613d3 crypto/tls: use context.AfterFunc in handshakeContext
+ 2025-09-05 e8126bce9e runtime/cgo: save and restore R31 for crosscall1 on loong64
+ 2025-09-05 d767064170 cmd/compile: mark abi.PtrType.Elem sym as used
+ 2025-09-05 0b1eed09a3 vendor/golang.org/x/tools: update to a09a2fb
+ 2025-09-05 f5b20689e9 cmd/compile: optimize loads from readonly globals into constants on loong64
+ 2025-09-05 3492e4262b cmd/compile: simplify specific addition operations using the ADDV16 instruction
+ 2025-09-05 459b85ccaa cmd/fix: remove all functionality except for buildtag
+ 2025-09-05 87e72769fa runtime: simplify openbsd check in usesLibcall and mStackIsSystemAllocated
+ 2025-09-05 bb48272e24 cmd/compile: simplify zerorange on mips64
+ 2025-09-05 d52a56cce1 cmd/link/internal/ld: unconditionally use posix_fallocate on FreeBSD
+ 2025-09-04 9d0829963c net/http: fix cookie value of "" being interpreted as empty string.
+ 2025-09-04 ddce0522be cmd/internal/obj/loong64: add ADDU16I.D instruction support
+ 2025-09-04 00b8474e47 cmd/trace: don't filter events for profile by whether they have stack
+ 2025-09-04 e36c5aead6 log/slog: add multiple handlers support for logger
+ 2025-09-04 150fae714e crypto/x509: don't force system roots load in SetFallbackRoots
+ 2025-09-04 4f7bbc62c7 runtime, cmd/compile, cmd/internal/obj: remove duff support for loong64
+ 2025-09-04 b8cc907425 cmd/internal/obj/loong64: fix the usage of offset in the instructions [X]VLDREPL.{B/H/W/D}
+ 2025-09-04 8c27a80890 path{,/filepath}: speed up Match
+ 2025-09-04 b7c20413c5 runtime: remove obsolete osArchInit function
+ 2025-09-04 df29038486 cmd/compile/internal/ssa: load constant values from abi.PtrType.Elem
+ 2025-09-04 4373754bc9 cmd/compile: add store to load forwarding rules on riscv64
+ 2025-09-03 80038586ed cmd/compile: export to DWARF types only referenced through interfaces
+ 2025-09-03 91e76a513b cmd/compile: use generated loops instead of DUFFCOPY on loong64
+ 2025-09-03 c552ad913f cmd/compile: simplify memory load and store operations on loong64
+ 2025-09-03 e8f9127d1f net/netip: export Prefix.Compare, fix ordering
+ 2025-09-03 731e546166 cmd/compile: simplify the support for 32bit high multiply on loong64

Change-Id: I2c124fb8071e2972d39804867cafb6806e601aba
This commit is contained in:
Cherry Mui 2025-09-11 15:09:34 -04:00
commit 9a349f8e72
204 changed files with 7284 additions and 5911 deletions

1
api/next/61642.txt Normal file
View file

@ -0,0 +1 @@
pkg net/netip, method (Prefix) Compare(Prefix) int #61642

3
api/next/63963.txt Normal file
View file

@ -0,0 +1,3 @@
pkg crypto/ecdsa, type PrivateKey struct, D //deprecated #63963
pkg crypto/ecdsa, type PublicKey struct, X //deprecated #63963
pkg crypto/ecdsa, type PublicKey struct, Y //deprecated #63963

6
api/next/65954.txt Normal file
View file

@ -0,0 +1,6 @@
pkg log/slog, func NewMultiHandler(...Handler) *MultiHandler #65954
pkg log/slog, method (*MultiHandler) Enabled(context.Context, Level) bool #65954
pkg log/slog, method (*MultiHandler) Handle(context.Context, Record) error #65954
pkg log/slog, method (*MultiHandler) WithAttrs([]Attr) Handler #65954
pkg log/slog, method (*MultiHandler) WithGroup(string) Handler #65954
pkg log/slog, type MultiHandler struct #65954

View file

@ -0,0 +1 @@
The `big.Int` fields of [PublicKey] and [PrivateKey] are now deprecated.

View file

@ -0,0 +1,5 @@
If [PrivateKey] fields are modified after calling [PrivateKey.Precompute],
[PrivateKey.Validate] now fails.
[PrivateKey.D] is now checked for consistency with precomputed values, even if
it is not used.

View file

@ -0,0 +1,6 @@
The [`NewMultiHandler`](/pkg/log/slog#NewMultiHandler) function creates a
[`MultiHandler`](/pkg/log/slog#MultiHandler) that invokes all the given Handlers.
Its `Enable` method reports whether any of the handlers' `Enabled` methods
return true.
Its `Handle`, `WithAttr` and `WithGroup` methods call the corresponding method
on each of the enabled handlers.

View file

@ -0,0 +1 @@
The new [Prefix.Compare] method compares two prefixes.

View file

@ -213,15 +213,17 @@ func parsePAXTime(s string) (time.Time, error) {
} }
// Parse the nanoseconds. // Parse the nanoseconds.
if strings.Trim(sn, "0123456789") != "" { // Initialize an array with '0's to handle right padding automatically.
return time.Time{}, ErrHeader nanoDigits := [maxNanoSecondDigits]byte{'0', '0', '0', '0', '0', '0', '0', '0', '0'}
for i := range len(sn) {
switch c := sn[i]; {
case c < '0' || c > '9':
return time.Time{}, ErrHeader
case i < len(nanoDigits):
nanoDigits[i] = c
}
} }
if len(sn) < maxNanoSecondDigits { nsecs, _ := strconv.ParseInt(string(nanoDigits[:]), 10, 64) // Must succeed after validation
sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad
} else {
sn = sn[:maxNanoSecondDigits] // Right truncate
}
nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed
if len(ss) > 0 && ss[0] == '-' { if len(ss) > 0 && ss[0] == '-' {
return time.Unix(secs, -1*nsecs), nil // Negative correction return time.Unix(secs, -1*nsecs), nil // Negative correction
} }

View file

@ -439,3 +439,66 @@ func TestFormatPAXRecord(t *testing.T) {
} }
} }
} }
func BenchmarkParsePAXTime(b *testing.B) {
tests := []struct {
name string
in string
want time.Time
ok bool
}{
{
name: "NoNanos",
in: "123456",
want: time.Unix(123456, 0),
ok: true,
},
{
name: "ExactNanos",
in: "1.123456789",
want: time.Unix(1, 123456789),
ok: true,
},
{
name: "WithNanoPadding",
in: "1.123",
want: time.Unix(1, 123000000),
ok: true,
},
{
name: "WithNanoTruncate",
in: "1.123456789123",
want: time.Unix(1, 123456789),
ok: true,
},
{
name: "TrailingError",
in: "1.123abc",
want: time.Time{},
ok: false,
},
{
name: "LeadingError",
in: "1.abc123",
want: time.Time{},
ok: false,
},
}
for _, tt := range tests {
b.Run(tt.name, func(b *testing.B) {
b.ReportAllocs()
for b.Loop() {
ts, err := parsePAXTime(tt.in)
if (err == nil) != tt.ok {
if err != nil {
b.Fatal(err)
}
b.Fatal("expected error")
}
if !ts.Equal(tt.want) {
b.Fatalf("time mismatch: got %v, want %v", ts, tt.want)
}
}
})
}
}

View file

@ -282,6 +282,13 @@ lable2:
MOVVP 4(R5), R4 // a4040026 MOVVP 4(R5), R4 // a4040026
MOVVP (R5), R4 // a4000026 MOVVP (R5), R4 // a4000026
// ADDU16I.D instruction
ADDV16 $(-32768<<16), R4, R5 // ADDV16 $-2147483648, R4, R5 // 85000012
ADDV16 $(0<<16), R4, R5 // ADDV16 $0, R4, R5 // 85000010
ADDV16 $(8<<16), R4, R5 // ADDV16 $524288, R4, R5 // 85200010
ADDV16 $(32767<<16), R4, R5 // ADDV16 $2147418112, R4, R5 // 85fcff11
ADDV16 $(16<<16), R4 // ADDV16 $1048576, R4 // 84400010
// Loong64 atomic memory access instructions // Loong64 atomic memory access instructions
AMSWAPB R14, (R13), R12 // ac395c38 AMSWAPB R14, (R13), R12 // ac395c38
AMSWAPH R14, (R13), R12 // acb95c38 AMSWAPH R14, (R13), R12 // acb95c38
@ -538,13 +545,29 @@ lable2:
// Load data from memory and broadcast to each element of a vector register: VMOVQ offset(Rj), <Vd>.<T> // Load data from memory and broadcast to each element of a vector register: VMOVQ offset(Rj), <Vd>.<T>
VMOVQ (R4), V0.B16 // 80008030 VMOVQ (R4), V0.B16 // 80008030
VMOVQ 1(R4), V1.H8 // 81044030 VMOVQ 1(R4), V0.B16 // 80048030
VMOVQ 2(R4), V2.W4 // 82082030 VMOVQ -3(R4), V0.B16 // 80f4bf30
VMOVQ 3(R4), V3.V2 // 830c1030 VMOVQ (R4), V1.H8 // 81004030
VMOVQ 2(R4), V1.H8 // 81044030
VMOVQ -6(R4), V1.H8 // 81f45f30
VMOVQ (R4), V2.W4 // 82002030
VMOVQ 8(R4), V2.W4 // 82082030
VMOVQ -12(R4), V2.W4 // 82f42f30
VMOVQ (R4), V3.V2 // 83001030
VMOVQ 24(R4), V3.V2 // 830c1030
VMOVQ -16(R4), V3.V2 // 83f81730
XVMOVQ (R4), X0.B32 // 80008032 XVMOVQ (R4), X0.B32 // 80008032
XVMOVQ 1(R4), X1.H16 // 81044032 XVMOVQ 1(R4), X0.B32 // 80048032
XVMOVQ 2(R4), X2.W8 // 82082032 XVMOVQ -5(R4), X0.B32 // 80ecbf32
XVMOVQ 3(R4), X3.V4 // 830c1032 XVMOVQ (R4), X1.H16 // 81004032
XVMOVQ 2(R4), X1.H16 // 81044032
XVMOVQ -10(R4), X1.H16 // 81ec5f32
XVMOVQ (R4), X2.W8 // 82002032
XVMOVQ 8(R4), X2.W8 // 82082032
XVMOVQ -20(R4), X2.W8 // 82ec2f32
XVMOVQ (R4), X3.V4 // 83001032
XVMOVQ 24(R4), X3.V4 // 830c1032
XVMOVQ -24(R4), X3.V4 // 83f41732
// VSEQ{B,H,W,V}, XVSEQ{B,H,W,V} instruction // VSEQ{B,H,W,V}, XVSEQ{B,H,W,V} instruction
VSEQB V1, V2, V3 // 43040070 VSEQB V1, V2, V3 // 43040070

View file

@ -5,3 +5,5 @@
TEXT errors(SB),$0 TEXT errors(SB),$0
VSHUF4IV $16, V1, V2 // ERROR "operand out of range 0 to 15" VSHUF4IV $16, V1, V2 // ERROR "operand out of range 0 to 15"
XVSHUF4IV $16, X1, X2 // ERROR "operand out of range 0 to 15" XVSHUF4IV $16, X1, X2 // ERROR "operand out of range 0 to 15"
ADDV16 $1, R4, R5 // ERROR "the constant must be a multiple of 65536."
ADDV16 $65535, R4, R5 // ERROR "the constant must be a multiple of 65536."

View file

@ -183,18 +183,16 @@ func splitQuoted(s string) (r []string, err error) {
return args, err return args, err
} }
// Translate rewrites f.AST, the original Go input, to remove // loadDebug runs gcc to load debug information for the File. The debug
// references to the imported package C, replacing them with // information will be saved to the debugs field of the file, and be
// references to the equivalent Go types, functions, and variables. // processed when Translate is called on the file later.
func (p *Package) Translate(f *File) { // loadDebug is called concurrently with different files.
func (f *File) loadDebug(p *Package) {
for _, cref := range f.Ref { for _, cref := range f.Ref {
// Convert C.ulong to C.unsigned long, etc. // Convert C.ulong to C.unsigned long, etc.
cref.Name.C = cname(cref.Name.Go) cref.Name.C = cname(cref.Name.Go)
} }
var conv typeConv
conv.Init(p.PtrSize, p.IntSize)
ft := fileTypedefs{typedefs: make(map[string]bool)} ft := fileTypedefs{typedefs: make(map[string]bool)}
numTypedefs := -1 numTypedefs := -1
for len(ft.typedefs) > numTypedefs { for len(ft.typedefs) > numTypedefs {
@ -213,8 +211,7 @@ func (p *Package) Translate(f *File) {
} }
needType := p.guessKinds(f) needType := p.guessKinds(f)
if len(needType) > 0 { if len(needType) > 0 {
d := p.loadDWARF(f, &ft, needType) f.debugs = append(f.debugs, p.loadDWARF(f, &ft, needType))
p.recordTypes(f, d, &conv)
} }
// In godefs mode we're OK with the typedefs, which // In godefs mode we're OK with the typedefs, which
@ -224,6 +221,18 @@ func (p *Package) Translate(f *File) {
break break
} }
} }
}
// Translate rewrites f.AST, the original Go input, to remove
// references to the imported package C, replacing them with
// references to the equivalent Go types, functions, and variables.
// Preconditions: File.loadDebug must be called prior to translate.
func (p *Package) Translate(f *File) {
var conv typeConv
conv.Init(p.PtrSize, p.IntSize)
for _, d := range f.debugs {
p.recordTypes(f, d, &conv)
}
p.prepareNames(f) p.prepareNames(f)
if p.rewriteCalls(f) { if p.rewriteCalls(f) {
// Add `import _cgo_unsafe "unsafe"` after the package statement. // Add `import _cgo_unsafe "unsafe"` after the package statement.
@ -280,6 +289,7 @@ func (f *File) loadDefines(gccOptions []string) bool {
// guessKinds tricks gcc into revealing the kind of each // guessKinds tricks gcc into revealing the kind of each
// name xxx for the references C.xxx in the Go input. // name xxx for the references C.xxx in the Go input.
// The kind is either a constant, type, or variable. // The kind is either a constant, type, or variable.
// guessKinds is called concurrently with different files.
func (p *Package) guessKinds(f *File) []*Name { func (p *Package) guessKinds(f *File) []*Name {
// Determine kinds for names we already know about, // Determine kinds for names we already know about,
// like #defines or 'struct foo', before bothering with gcc. // like #defines or 'struct foo', before bothering with gcc.
@ -523,6 +533,7 @@ func (p *Package) guessKinds(f *File) []*Name {
// loadDWARF parses the DWARF debug information generated // loadDWARF parses the DWARF debug information generated
// by gcc to learn the details of the constants, variables, and types // by gcc to learn the details of the constants, variables, and types
// being referred to as C.xxx. // being referred to as C.xxx.
// loadDwarf is called concurrently with different files.
func (p *Package) loadDWARF(f *File, ft *fileTypedefs, names []*Name) *debug { func (p *Package) loadDWARF(f *File, ft *fileTypedefs, names []*Name) *debug {
// Extract the types from the DWARF section of an object // Extract the types from the DWARF section of an object
// from a well-formed C program. Gcc only generates DWARF info // from a well-formed C program. Gcc only generates DWARF info
@ -1786,6 +1797,7 @@ func gccTmp() string {
// gccCmd returns the gcc command line to use for compiling // gccCmd returns the gcc command line to use for compiling
// the input. // the input.
// gccCommand is called concurrently for different files.
func (p *Package) gccCmd(ofile string) []string { func (p *Package) gccCmd(ofile string) []string {
c := append(gccBaseCmd, c := append(gccBaseCmd,
"-w", // no warnings "-w", // no warnings
@ -1829,6 +1841,7 @@ func (p *Package) gccCmd(ofile string) []string {
// gccDebug runs gcc -gdwarf-2 over the C program stdin and // gccDebug runs gcc -gdwarf-2 over the C program stdin and
// returns the corresponding DWARF data and, if present, debug data block. // returns the corresponding DWARF data and, if present, debug data block.
// gccDebug is called concurrently with different C programs.
func (p *Package) gccDebug(stdin []byte, nnames int) (d *dwarf.Data, ints []int64, floats []float64, strs []string) { func (p *Package) gccDebug(stdin []byte, nnames int) (d *dwarf.Data, ints []int64, floats []float64, strs []string) {
ofile := gccTmp() ofile := gccTmp()
runGcc(stdin, p.gccCmd(ofile)) runGcc(stdin, p.gccCmd(ofile))
@ -2219,6 +2232,7 @@ func gccDefines(stdin []byte, gccOptions []string) string {
// gccErrors runs gcc over the C program stdin and returns // gccErrors runs gcc over the C program stdin and returns
// the errors that gcc prints. That is, this function expects // the errors that gcc prints. That is, this function expects
// gcc to fail. // gcc to fail.
// gccErrors is called concurrently with different C programs.
func (p *Package) gccErrors(stdin []byte, extraArgs ...string) string { func (p *Package) gccErrors(stdin []byte, extraArgs ...string) string {
// TODO(rsc): require failure // TODO(rsc): require failure
args := p.gccCmd(gccTmp()) args := p.gccCmd(gccTmp())

View file

@ -30,6 +30,7 @@ import (
"cmd/internal/edit" "cmd/internal/edit"
"cmd/internal/hash" "cmd/internal/hash"
"cmd/internal/objabi" "cmd/internal/objabi"
"cmd/internal/par"
"cmd/internal/telemetry/counter" "cmd/internal/telemetry/counter"
) )
@ -74,6 +75,8 @@ type File struct {
NoCallbacks map[string]bool // C function names that with #cgo nocallback directive NoCallbacks map[string]bool // C function names that with #cgo nocallback directive
NoEscapes map[string]bool // C function names that with #cgo noescape directive NoEscapes map[string]bool // C function names that with #cgo noescape directive
Edit *edit.Buffer Edit *edit.Buffer
debugs []*debug // debug data from iterations of gccDebug. Initialized by File.loadDebug.
} }
func (f *File) offset(p token.Pos) int { func (f *File) offset(p token.Pos) int {
@ -391,7 +394,7 @@ func main() {
h := hash.New32() h := hash.New32()
io.WriteString(h, *importPath) io.WriteString(h, *importPath)
var once sync.Once var once sync.Once
var wg sync.WaitGroup q := par.NewQueue(runtime.GOMAXPROCS(0))
fs := make([]*File, len(goFiles)) fs := make([]*File, len(goFiles))
for i, input := range goFiles { for i, input := range goFiles {
if *srcDir != "" { if *srcDir != "" {
@ -413,9 +416,7 @@ func main() {
fatalf("%s", err) fatalf("%s", err)
} }
wg.Add(1) q.Add(func() {
go func() {
defer wg.Done()
// Apply trimpath to the file path. The path won't be read from after this point. // Apply trimpath to the file path. The path won't be read from after this point.
input, _ = objabi.ApplyRewrites(input, *trimpath) input, _ = objabi.ApplyRewrites(input, *trimpath)
if strings.ContainsAny(input, "\r\n") { if strings.ContainsAny(input, "\r\n") {
@ -436,10 +437,12 @@ func main() {
}) })
fs[i] = f fs[i] = f
}()
f.loadDebug(p)
})
} }
wg.Wait() <-q.Idle()
cPrefix = fmt.Sprintf("_%x", h.Sum(nil)[0:6]) cPrefix = fmt.Sprintf("_%x", h.Sum(nil)[0:6])

View file

@ -1370,7 +1370,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
} }
case ssa.OpAMD64LoweredPanicBoundsCR: case ssa.OpAMD64LoweredPanicBoundsCR:
yIsReg = true yIsReg = true
yVal := int(v.Args[0].Reg() - x86.REG_AX) yVal = int(v.Args[0].Reg() - x86.REG_AX)
c := v.Aux.(ssa.PanicBoundsC).C c := v.Aux.(ssa.PanicBoundsC).C
if c >= 0 && c <= abi.BoundsMaxConst { if c >= 0 && c <= abi.BoundsMaxConst {
xVal = int(c) xVal = int(c)

View file

@ -777,7 +777,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
} }
case ssa.OpARMLoweredPanicBoundsCR: case ssa.OpARMLoweredPanicBoundsCR:
yIsReg = true yIsReg = true
yVal := int(v.Args[0].Reg() - arm.REG_R0) yVal = int(v.Args[0].Reg() - arm.REG_R0)
c := v.Aux.(ssa.PanicBoundsC).C c := v.Aux.(ssa.PanicBoundsC).C
if c >= 0 && c <= abi.BoundsMaxConst { if c >= 0 && c <= abi.BoundsMaxConst {
xVal = int(c) xVal = int(c)

View file

@ -1050,6 +1050,27 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Offset = int64(condCode) p.From.Offset = int64(condCode)
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpARM64CCMP,
ssa.OpARM64CCMN,
ssa.OpARM64CCMPconst,
ssa.OpARM64CCMNconst,
ssa.OpARM64CCMPW,
ssa.OpARM64CCMNW,
ssa.OpARM64CCMPWconst,
ssa.OpARM64CCMNWconst:
p := s.Prog(v.Op.Asm())
p.Reg = v.Args[0].Reg()
params := v.AuxArm64ConditionalParams()
p.From.Type = obj.TYPE_SPECIAL // assembler encodes conditional bits in Offset
p.From.Offset = int64(condBits[params.Cond()])
constValue, ok := params.ConstValue()
if ok {
p.AddRestSourceConst(constValue)
} else {
p.AddRestSourceReg(v.Args[1].Reg())
}
p.To.Type = obj.TYPE_CONST
p.To.Offset = params.Nzcv()
case ssa.OpARM64LoweredZero: case ssa.OpARM64LoweredZero:
ptrReg := v.Args[0].Reg() ptrReg := v.Args[0].Reg()
n := v.AuxInt n := v.AuxInt
@ -1319,7 +1340,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
} }
case ssa.OpARM64LoweredPanicBoundsCR: case ssa.OpARM64LoweredPanicBoundsCR:
yIsReg = true yIsReg = true
yVal := int(v.Args[0].Reg() - arm64.REG_R0) yVal = int(v.Args[0].Reg() - arm64.REG_R0)
c := v.Aux.(ssa.PanicBoundsC).C c := v.Aux.(ssa.PanicBoundsC).C
if c >= 0 && c <= abi.BoundsMaxConst { if c >= 0 && c <= abi.BoundsMaxConst {
xVal = int(c) xVal = int(c)

View file

@ -128,14 +128,29 @@ func Info(ctxt *obj.Link, fnsym *obj.LSym, infosym *obj.LSym, curfn obj.Func) (s
// already referenced by a dwarf var, attach an R_USETYPE relocation to // already referenced by a dwarf var, attach an R_USETYPE relocation to
// the function symbol to insure that the type included in DWARF // the function symbol to insure that the type included in DWARF
// processing during linking. // processing during linking.
// Do the same with R_USEIFACE relocations from the function symbol for the
// same reason.
// All these R_USETYPE relocations are only looked at if the function
// survives deadcode elimination in the linker.
typesyms := []*obj.LSym{} typesyms := []*obj.LSym{}
for t := range fnsym.Func().Autot { for t := range fnsym.Func().Autot {
typesyms = append(typesyms, t) typesyms = append(typesyms, t)
} }
for i := range fnsym.R {
if fnsym.R[i].Type == objabi.R_USEIFACE && !strings.HasPrefix(fnsym.R[i].Sym.Name, "go:itab.") {
// Types referenced through itab will be referenced from somewhere else
typesyms = append(typesyms, fnsym.R[i].Sym)
}
}
slices.SortFunc(typesyms, func(a, b *obj.LSym) int { slices.SortFunc(typesyms, func(a, b *obj.LSym) int {
return strings.Compare(a.Name, b.Name) return strings.Compare(a.Name, b.Name)
}) })
var lastsym *obj.LSym
for _, sym := range typesyms { for _, sym := range typesyms {
if sym == lastsym {
continue
}
lastsym = sym
infosym.AddRel(ctxt, obj.Reloc{Type: objabi.R_USETYPE, Sym: sym}) infosym.AddRel(ctxt, obj.Reloc{Type: objabi.R_USETYPE, Sym: sym})
} }
fnsym.Func().Autot = nil fnsym.Func().Autot = nil

View file

@ -0,0 +1,105 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dwarfgen
import (
"debug/dwarf"
"internal/platform"
"internal/testenv"
"io"
"runtime"
"testing"
)
func TestIssue75249(t *testing.T) {
testenv.MustHaveGoRun(t)
t.Parallel()
if !platform.ExecutableHasDWARF(runtime.GOOS, runtime.GOARCH) {
t.Skipf("skipping on %s/%s: no DWARF symbol table in executables", runtime.GOOS, runtime.GOARCH)
}
code := `
package main
type Data struct {
Field1 int
Field2 *int
Field3 int
Field4 *int
Field5 int
Field6 *int
Field7 int
Field8 *int
}
//go:noinline
func InitializeData(d *Data) {
d.Field1++ // line 16
d.Field2 = d.Field4
d.Field3++
d.Field4 = d.Field6
d.Field5++
d.Field6 = d.Field8
d.Field7++
d.Field8 = d.Field2 // line 23
}
func main() {
var data Data
InitializeData(&data)
}
`
_, f := gobuild(t, t.TempDir(), true, []testline{{line: code}})
defer f.Close()
dwarfData, err := f.DWARF()
if err != nil {
t.Fatal(err)
}
dwarfReader := dwarfData.Reader()
for {
entry, err := dwarfReader.Next()
if err != nil {
t.Fatal(err)
}
if entry == nil {
break
}
if entry.Tag != dwarf.TagCompileUnit {
continue
}
name := entry.AttrField(dwarf.AttrName)
if name == nil || name.Class != dwarf.ClassString || name.Val != "main" {
continue
}
lr, err := dwarfData.LineReader(entry)
if err != nil {
t.Fatal(err)
}
stmts := map[int]bool{}
for {
var le dwarf.LineEntry
err := lr.Next(&le)
if err == io.EOF {
break
}
if err != nil {
t.Fatal(err)
}
if !le.IsStmt {
continue
}
stmts[le.Line] = true
}
for i := 16; i <= 23; i++ {
if !stmts[i] {
t.Errorf("missing statement at line %d", i)
}
}
}
}

View file

@ -768,6 +768,17 @@ opSwitch:
if n.X.Op() == ir.OINDEX && isIndexingCoverageCounter(n.X) { if n.X.Op() == ir.OINDEX && isIndexingCoverageCounter(n.X) {
return false return false
} }
case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
n := n.(*ir.SliceExpr)
// Ignore superfluous slicing.
if n.Low != nil && n.Low.Op() == ir.OLITERAL && ir.Int64Val(n.Low) == 0 {
v.budget++
}
if n.High != nil && n.High.Op() == ir.OLEN && n.High.(*ir.UnaryExpr).X == n.X {
v.budget += 2
}
} }
v.budget-- v.budget--

View file

@ -185,7 +185,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
ssa.OpLOONG64MULD, ssa.OpLOONG64MULD,
ssa.OpLOONG64DIVF, ssa.OpLOONG64DIVF,
ssa.OpLOONG64DIVD, ssa.OpLOONG64DIVD,
ssa.OpLOONG64MULV, ssa.OpLOONG64MULHV, ssa.OpLOONG64MULHVU, ssa.OpLOONG64MULV, ssa.OpLOONG64MULHV, ssa.OpLOONG64MULHVU, ssa.OpLOONG64MULH, ssa.OpLOONG64MULHU,
ssa.OpLOONG64DIVV, ssa.OpLOONG64REMV, ssa.OpLOONG64DIVVU, ssa.OpLOONG64REMVU, ssa.OpLOONG64DIVV, ssa.OpLOONG64REMV, ssa.OpLOONG64DIVVU, ssa.OpLOONG64REMVU,
ssa.OpLOONG64FCOPYSGD: ssa.OpLOONG64FCOPYSGD:
p := s.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
@ -276,6 +276,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpLOONG64ADDVconst, case ssa.OpLOONG64ADDVconst,
ssa.OpLOONG64ADDV16const,
ssa.OpLOONG64SUBVconst, ssa.OpLOONG64SUBVconst,
ssa.OpLOONG64ANDconst, ssa.OpLOONG64ANDconst,
ssa.OpLOONG64ORconst, ssa.OpLOONG64ORconst,
@ -552,13 +553,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpLOONG64DUFFZERO:
// runtime.duffzero expects start address in R20
p := s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = v.AuxInt
case ssa.OpLOONG64LoweredZero: case ssa.OpLOONG64LoweredZero:
ptrReg := v.Args[0].Reg() ptrReg := v.Args[0].Reg()
n := v.AuxInt n := v.AuxInt
@ -652,49 +646,120 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
zero8(s, ptrReg, off+n-8) zero8(s, ptrReg, off+n-8)
} }
case ssa.OpLOONG64DUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffcopy
p.To.Offset = v.AuxInt
case ssa.OpLOONG64LoweredMove: case ssa.OpLOONG64LoweredMove:
// MOVx (Rarg1), Rtmp dstReg := v.Args[0].Reg()
// MOVx Rtmp, (Rarg0) srcReg := v.Args[1].Reg()
// ADDV $sz, Rarg1 if dstReg == srcReg {
// ADDV $sz, Rarg0 break
// BGEU Rarg2, Rarg0, -4(PC) }
mov, sz := largestMove(v.AuxInt) tmpReg := int16(loong64.REG_R20)
p := s.Prog(mov) n := v.AuxInt
p.From.Type = obj.TYPE_MEM if n < 16 {
p.From.Reg = v.Args[1].Reg() v.Fatalf("Move too small %d", n)
}
var off int64
for n >= 8 {
// MOVV off(srcReg), tmpReg
// MOVV tmpReg, off(dstReg)
move8(s, srcReg, dstReg, tmpReg, off)
off += 8
n -= 8
}
if n != 0 {
// MOVV off+n-8(srcReg), tmpReg
// MOVV tmpReg, off+n-8(srcReg)
move8(s, srcReg, dstReg, tmpReg, off+n-8)
}
case ssa.OpLOONG64LoweredMoveLoop:
dstReg := v.Args[0].Reg()
srcReg := v.Args[1].Reg()
if dstReg == srcReg {
break
}
countReg := int16(loong64.REG_R20)
tmpReg := int16(loong64.REG_R21)
var off int64
n := v.AuxInt
loopSize := int64(64)
if n < 3*loopSize {
// - a loop count of 0 won't work.
// - a loop count of 1 is useless.
// - a loop count of 2 is a code size ~tie
// 4 instructions to implement the loop
// 8 instructions in the loop body
// vs
// 16 instructions in the straightline code
// Might as well use straightline code.
v.Fatalf("ZeroLoop size too small %d", n)
}
// Put iteration count in a register.
// MOVV $n/loopSize, countReg
p := s.Prog(loong64.AMOVV)
p.From.Type = obj.TYPE_CONST
p.From.Offset = n / loopSize
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = loong64.REGTMP p.To.Reg = countReg
cntInit := p
p2 := s.Prog(mov) // Move loopSize bytes starting at srcReg to dstReg.
p2.From.Type = obj.TYPE_REG for range loopSize / 8 {
p2.From.Reg = loong64.REGTMP // MOVV off(srcReg), tmpReg
p2.To.Type = obj.TYPE_MEM // MOVV tmpReg, off(dstReg)
p2.To.Reg = v.Args[0].Reg() move8(s, srcReg, dstReg, tmpReg, off)
off += 8
}
p3 := s.Prog(loong64.AADDVU) // Increment srcReg and destReg by loopSize.
p3.From.Type = obj.TYPE_CONST // ADDV $loopSize, srcReg
p3.From.Offset = sz p = s.Prog(loong64.AADDV)
p3.To.Type = obj.TYPE_REG p.From.Type = obj.TYPE_CONST
p3.To.Reg = v.Args[1].Reg() p.From.Offset = loopSize
p.To.Type = obj.TYPE_REG
p.To.Reg = srcReg
// ADDV $loopSize, dstReg
p = s.Prog(loong64.AADDV)
p.From.Type = obj.TYPE_CONST
p.From.Offset = loopSize
p.To.Type = obj.TYPE_REG
p.To.Reg = dstReg
p4 := s.Prog(loong64.AADDVU) // Decrement loop count.
p4.From.Type = obj.TYPE_CONST // SUBV $1, countReg
p4.From.Offset = sz p = s.Prog(loong64.ASUBV)
p4.To.Type = obj.TYPE_REG p.From.Type = obj.TYPE_CONST
p4.To.Reg = v.Args[0].Reg() p.From.Offset = 1
p.To.Type = obj.TYPE_REG
p.To.Reg = countReg
p5 := s.Prog(loong64.ABGEU) // Jump to loop header if we're not done yet.
p5.From.Type = obj.TYPE_REG // BNE countReg, loop header
p5.From.Reg = v.Args[2].Reg() p = s.Prog(loong64.ABNE)
p5.Reg = v.Args[1].Reg() p.From.Type = obj.TYPE_REG
p5.To.Type = obj.TYPE_BRANCH p.From.Reg = countReg
p5.To.SetTarget(p) p.To.Type = obj.TYPE_BRANCH
p.To.SetTarget(cntInit.Link)
// Multiples of the loop size are now done.
n %= loopSize
off = 0
// Copy any fractional portion.
for n >= 8 {
// MOVV off(srcReg), tmpReg
// MOVV tmpReg, off(dstReg)
move8(s, srcReg, dstReg, tmpReg, off)
off += 8
n -= 8
}
if n != 0 {
// MOVV off+n-8(srcReg), tmpReg
// MOVV tmpReg, off+n-8(srcReg)
move8(s, srcReg, dstReg, tmpReg, off+n-8)
}
case ssa.OpLOONG64CALLstatic, ssa.OpLOONG64CALLclosure, ssa.OpLOONG64CALLinter: case ssa.OpLOONG64CALLstatic, ssa.OpLOONG64CALLclosure, ssa.OpLOONG64CALLinter:
s.Call(v) s.Call(v)
@ -746,7 +811,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
} }
case ssa.OpLOONG64LoweredPanicBoundsCR: case ssa.OpLOONG64LoweredPanicBoundsCR:
yIsReg = true yIsReg = true
yVal := int(v.Args[0].Reg() - loong64.REG_R4) yVal = int(v.Args[0].Reg() - loong64.REG_R4)
c := v.Aux.(ssa.PanicBoundsC).C c := v.Aux.(ssa.PanicBoundsC).C
if c >= 0 && c <= abi.BoundsMaxConst { if c >= 0 && c <= abi.BoundsMaxConst {
xVal = int(c) xVal = int(c)
@ -1225,6 +1290,24 @@ func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg in
return p return p
} }
// move8 copies 8 bytes at src+off to dst+off.
func move8(s *ssagen.State, src, dst, tmp int16, off int64) {
// MOVV off(src), tmp
ld := s.Prog(loong64.AMOVV)
ld.From.Type = obj.TYPE_MEM
ld.From.Reg = src
ld.From.Offset = off
ld.To.Type = obj.TYPE_REG
ld.To.Reg = tmp
// MOVV tmp, off(dst)
st := s.Prog(loong64.AMOVV)
st.From.Type = obj.TYPE_REG
st.From.Reg = tmp
st.To.Type = obj.TYPE_MEM
st.To.Reg = dst
st.To.Offset = off
}
// zero8 zeroes 8 bytes at reg+off. // zero8 zeroes 8 bytes at reg+off.
func zero8(s *ssagen.State, reg int16, off int64) { func zero8(s *ssagen.State, reg int16, off int64) {
// MOVV ZR, off(reg) // MOVV ZR, off(reg)

View file

@ -5,41 +5,21 @@
package mips package mips
import ( import (
"cmd/compile/internal/base"
"cmd/compile/internal/objw" "cmd/compile/internal/objw"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/obj" "cmd/internal/obj"
"cmd/internal/obj/mips" "cmd/internal/obj/mips"
) )
// TODO(mips): implement DUFFZERO
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt%int64(types.PtrSize) != 0 {
if cnt == 0 { panic("zeroed region not aligned")
return p
} }
if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) { for cnt != 0 {
p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, base.Ctxt.Arch.FixedFrameSize+off+i) p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, off)
} cnt -= int64(types.PtrSize)
} else { off += int64(types.PtrSize)
//fmt.Printf("zerorange frame:%v, lo: %v, hi:%v \n", frame ,lo, hi)
// ADD $(FIXED_FRAME+frame+lo-4), SP, r1
// ADD $cnt, r1, r2
// loop:
// MOVW R0, (Widthptr)r1
// ADD $Widthptr, r1
// BNE r1, r2, loop
p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, base.Ctxt.Arch.FixedFrameSize+off-4, obj.TYPE_REG, mips.REGRT1, 0)
p.Reg = mips.REGSP
p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
p.Reg = mips.REGRT1
p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize))
p1 := p
p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0)
p = pp.Append(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
p.Reg = mips.REGRT2
p.To.SetTarget(p1)
} }
return p return p

View file

@ -551,7 +551,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
} }
case ssa.OpMIPSLoweredPanicBoundsCR: case ssa.OpMIPSLoweredPanicBoundsCR:
yIsReg = true yIsReg = true
yVal := int(v.Args[0].Reg() - mips.REG_R1) yVal = int(v.Args[0].Reg() - mips.REG_R1)
c := v.Aux.(ssa.PanicBoundsC).C c := v.Aux.(ssa.PanicBoundsC).C
if c >= 0 && c <= abi.BoundsMaxConst { if c >= 0 && c <= abi.BoundsMaxConst {
xVal = int(c) xVal = int(c)

View file

@ -5,7 +5,6 @@
package mips64 package mips64
import ( import (
"cmd/compile/internal/ir"
"cmd/compile/internal/objw" "cmd/compile/internal/objw"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/obj" "cmd/internal/obj"
@ -13,37 +12,14 @@ import (
) )
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 { if cnt%int64(types.PtrSize) != 0 {
return p panic("zeroed region not aligned")
} }
if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) { for cnt != 0 {
p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+off+i) p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, off)
} cnt -= int64(types.PtrSize)
} else if cnt <= int64(128*types.PtrSize) { off += int64(types.PtrSize)
p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
p.Reg = mips.REGSP
p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize))
} else {
// ADDV $(8+frame+lo-8), SP, r1
// ADDV $cnt, r1, r2
// loop:
// MOVV R0, (Widthptr)r1
// ADDV $Widthptr, r1
// BNE r1, r2, loop
p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
p.Reg = mips.REGSP
p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
p.Reg = mips.REGRT1
p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize))
p1 := p
p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0)
p = pp.Append(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
p.Reg = mips.REGRT2
p.To.SetTarget(p1)
} }
return p return p

View file

@ -115,7 +115,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = y p.To.Reg = y
} }
case ssa.OpMIPS64MOVVnop: case ssa.OpMIPS64MOVVnop, ssa.OpMIPS64ZERO:
// nothing to do // nothing to do
case ssa.OpLoadReg: case ssa.OpLoadReg:
if v.Type.IsFlags() { if v.Type.IsFlags() {
@ -301,16 +301,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg() p.To.Reg = v.Args[0].Reg()
ssagen.AddAux(&p.To, v) ssagen.AddAux(&p.To, v)
case ssa.OpMIPS64MOVBstorezero,
ssa.OpMIPS64MOVHstorezero,
ssa.OpMIPS64MOVWstorezero,
ssa.OpMIPS64MOVVstorezero:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
ssagen.AddAux(&p.To, v)
case ssa.OpMIPS64MOVBreg, case ssa.OpMIPS64MOVBreg,
ssa.OpMIPS64MOVBUreg, ssa.OpMIPS64MOVBUreg,
ssa.OpMIPS64MOVHreg, ssa.OpMIPS64MOVHreg,
@ -542,7 +532,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
} }
case ssa.OpMIPS64LoweredPanicBoundsCR: case ssa.OpMIPS64LoweredPanicBoundsCR:
yIsReg = true yIsReg = true
yVal := int(v.Args[0].Reg() - mips.REG_R1) yVal = int(v.Args[0].Reg() - mips.REG_R1)
c := v.Aux.(ssa.PanicBoundsC).C c := v.Aux.(ssa.PanicBoundsC).C
if c >= 0 && c <= abi.BoundsMaxConst { if c >= 0 && c <= abi.BoundsMaxConst {
xVal = int(c) xVal = int(c)

View file

@ -1947,7 +1947,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
} }
case ssa.OpPPC64LoweredPanicBoundsCR: case ssa.OpPPC64LoweredPanicBoundsCR:
yIsReg = true yIsReg = true
yVal := int(v.Args[0].Reg() - ppc64.REG_R3) yVal = int(v.Args[0].Reg() - ppc64.REG_R3)
c := v.Aux.(ssa.PanicBoundsC).C c := v.Aux.(ssa.PanicBoundsC).C
if c >= 0 && c <= abi.BoundsMaxConst { if c >= 0 && c <= abi.BoundsMaxConst {
xVal = int(c) xVal = int(c)

View file

@ -414,6 +414,10 @@ var kinds = []abi.Kind{
types.TUNSAFEPTR: abi.UnsafePointer, types.TUNSAFEPTR: abi.UnsafePointer,
} }
func ABIKindOfType(t *types.Type) abi.Kind {
return kinds[t.Kind()]
}
var ( var (
memhashvarlen *obj.LSym memhashvarlen *obj.LSym
memequalvarlen *obj.LSym memequalvarlen *obj.LSym
@ -512,8 +516,7 @@ func dcommontype(c rttype.Cursor, t *types.Type) {
c.Field("Align_").WriteUint8(uint8(t.Alignment())) c.Field("Align_").WriteUint8(uint8(t.Alignment()))
c.Field("FieldAlign_").WriteUint8(uint8(t.Alignment())) c.Field("FieldAlign_").WriteUint8(uint8(t.Alignment()))
kind := kinds[t.Kind()] c.Field("Kind_").WriteUint8(uint8(ABIKindOfType(t)))
c.Field("Kind_").WriteUint8(uint8(kind))
c.Field("Equal").WritePtr(eqfunc) c.Field("Equal").WritePtr(eqfunc)
c.Field("GCData").WritePtr(gcsym) c.Field("GCData").WritePtr(gcsym)

View file

@ -6,7 +6,6 @@ package riscv64
import ( import (
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw" "cmd/compile/internal/objw"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/obj" "cmd/internal/obj"
@ -14,46 +13,19 @@ import (
) )
func zeroRange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { func zeroRange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 {
return p if cnt%int64(types.PtrSize) != 0 {
panic("zeroed region not aligned")
} }
// Adjust the frame to account for LR. // Adjust the frame to account for LR.
off += base.Ctxt.Arch.FixedFrameSize off += base.Ctxt.Arch.FixedFrameSize
if cnt < int64(4*types.PtrSize) { for cnt != 0 {
for i := int64(0); i < cnt; i += int64(types.PtrSize) { p = pp.Append(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_SP, off)
p = pp.Append(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_SP, off+i) cnt -= int64(types.PtrSize)
} off += int64(types.PtrSize)
return p
} }
if cnt <= int64(128*types.PtrSize) {
p = pp.Append(p, riscv.AADDI, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_X25, 0)
p.Reg = riscv.REG_SP
p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize))
return p
}
// Loop, zeroing pointer width bytes at a time.
// ADD $(off), SP, T0
// ADD $(cnt), T0, T1
// loop:
// MOV ZERO, (T0)
// ADD $Widthptr, T0
// BNE T0, T1, loop
p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_T0, 0)
p.Reg = riscv.REG_SP
p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, riscv.REG_T1, 0)
p.Reg = riscv.REG_T0
p = pp.Append(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_T0, 0)
loop := p
p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, riscv.REG_T0, 0)
p = pp.Append(p, riscv.ABNE, obj.TYPE_REG, riscv.REG_T0, 0, obj.TYPE_BRANCH, 0, 0)
p.Reg = riscv.REG_T1
p.To.SetTarget(loop)
return p return p
} }

View file

@ -181,6 +181,8 @@ func largestMove(alignment int64) (obj.As, int64) {
} }
} }
var fracMovOps = []obj.As{riscv.AMOVB, riscv.AMOVH, riscv.AMOVW, riscv.AMOV}
// ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags. // ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
// RISC-V has no flags, so this is a no-op. // RISC-V has no flags, so this is a no-op.
func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {} func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {}
@ -544,7 +546,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
} }
case ssa.OpRISCV64LoweredPanicBoundsCR: case ssa.OpRISCV64LoweredPanicBoundsCR:
yIsReg = true yIsReg = true
yVal := int(v.Args[0].Reg() - riscv.REG_X5) yVal = int(v.Args[0].Reg() - riscv.REG_X5)
c := v.Aux.(ssa.PanicBoundsC).C c := v.Aux.(ssa.PanicBoundsC).C
if c >= 0 && c <= abi.BoundsMaxConst { if c >= 0 && c <= abi.BoundsMaxConst {
xVal = int(c) xVal = int(c)
@ -738,70 +740,181 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.RegTo2 = riscv.REG_ZERO p.RegTo2 = riscv.REG_ZERO
case ssa.OpRISCV64LoweredZero: case ssa.OpRISCV64LoweredZero:
mov, sz := largestMove(v.AuxInt) ptr := v.Args[0].Reg()
sc := v.AuxValAndOff()
n := sc.Val64()
// mov ZERO, (Rarg0) mov, sz := largestMove(sc.Off64())
// ADD $sz, Rarg0
// BGEU Rarg1, Rarg0, -2(PC)
p := s.Prog(mov) // mov ZERO, (offset)(Rarg0)
p.From.Type = obj.TYPE_REG var off int64
p.From.Reg = riscv.REG_ZERO for n >= sz {
p.To.Type = obj.TYPE_MEM zeroOp(s, mov, ptr, off)
p.To.Reg = v.Args[0].Reg() off += sz
n -= sz
}
for i := len(fracMovOps) - 1; i >= 0; i-- {
tsz := int64(1 << i)
if n < tsz {
continue
}
zeroOp(s, fracMovOps[i], ptr, off)
off += tsz
n -= tsz
}
case ssa.OpRISCV64LoweredZeroLoop:
ptr := v.Args[0].Reg()
sc := v.AuxValAndOff()
n := sc.Val64()
mov, sz := largestMove(sc.Off64())
chunk := 8 * sz
if n <= 3*chunk {
v.Fatalf("ZeroLoop too small:%d, expect:%d", n, 3*chunk)
}
tmp := v.RegTmp()
p := s.Prog(riscv.AADD)
p.From.Type = obj.TYPE_CONST
p.From.Offset = n - n%chunk
p.Reg = ptr
p.To.Type = obj.TYPE_REG
p.To.Reg = tmp
for i := int64(0); i < 8; i++ {
zeroOp(s, mov, ptr, sz*i)
}
p2 := s.Prog(riscv.AADD) p2 := s.Prog(riscv.AADD)
p2.From.Type = obj.TYPE_CONST p2.From.Type = obj.TYPE_CONST
p2.From.Offset = sz p2.From.Offset = chunk
p2.To.Type = obj.TYPE_REG p2.To.Type = obj.TYPE_REG
p2.To.Reg = v.Args[0].Reg() p2.To.Reg = ptr
p3 := s.Prog(riscv.ABGEU) p3 := s.Prog(riscv.ABNE)
p3.To.Type = obj.TYPE_BRANCH p3.From.Reg = tmp
p3.Reg = v.Args[0].Reg()
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = v.Args[1].Reg() p3.Reg = ptr
p3.To.SetTarget(p) p3.To.Type = obj.TYPE_BRANCH
p3.To.SetTarget(p.Link)
n %= chunk
// mov ZERO, (offset)(Rarg0)
var off int64
for n >= sz {
zeroOp(s, mov, ptr, off)
off += sz
n -= sz
}
for i := len(fracMovOps) - 1; i >= 0; i-- {
tsz := int64(1 << i)
if n < tsz {
continue
}
zeroOp(s, fracMovOps[i], ptr, off)
off += tsz
n -= tsz
}
case ssa.OpRISCV64LoweredMove: case ssa.OpRISCV64LoweredMove:
mov, sz := largestMove(v.AuxInt) dst := v.Args[0].Reg()
src := v.Args[1].Reg()
if dst == src {
break
}
// mov (Rarg1), T2 sa := v.AuxValAndOff()
// mov T2, (Rarg0) n := sa.Val64()
// ADD $sz, Rarg0 mov, sz := largestMove(sa.Off64())
// ADD $sz, Rarg1
// BGEU Rarg2, Rarg0, -4(PC)
p := s.Prog(mov) var off int64
p.From.Type = obj.TYPE_MEM tmp := int16(riscv.REG_X5)
p.From.Reg = v.Args[1].Reg() for n >= sz {
moveOp(s, mov, dst, src, tmp, off)
off += sz
n -= sz
}
for i := len(fracMovOps) - 1; i >= 0; i-- {
tsz := int64(1 << i)
if n < tsz {
continue
}
moveOp(s, fracMovOps[i], dst, src, tmp, off)
off += tsz
n -= tsz
}
case ssa.OpRISCV64LoweredMoveLoop:
dst := v.Args[0].Reg()
src := v.Args[1].Reg()
if dst == src {
break
}
sc := v.AuxValAndOff()
n := sc.Val64()
mov, sz := largestMove(sc.Off64())
chunk := 8 * sz
if n <= 3*chunk {
v.Fatalf("MoveLoop too small:%d, expect:%d", n, 3*chunk)
}
tmp := int16(riscv.REG_X5)
p := s.Prog(riscv.AADD)
p.From.Type = obj.TYPE_CONST
p.From.Offset = n - n%chunk
p.Reg = src
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = riscv.REG_T2 p.To.Reg = riscv.REG_X6
p2 := s.Prog(mov) for i := int64(0); i < 8; i++ {
p2.From.Type = obj.TYPE_REG moveOp(s, mov, dst, src, tmp, sz*i)
p2.From.Reg = riscv.REG_T2 }
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = v.Args[0].Reg()
p3 := s.Prog(riscv.AADD) p1 := s.Prog(riscv.AADD)
p3.From.Type = obj.TYPE_CONST p1.From.Type = obj.TYPE_CONST
p3.From.Offset = sz p1.From.Offset = chunk
p3.To.Type = obj.TYPE_REG p1.To.Type = obj.TYPE_REG
p3.To.Reg = v.Args[0].Reg() p1.To.Reg = src
p4 := s.Prog(riscv.AADD) p2 := s.Prog(riscv.AADD)
p4.From.Type = obj.TYPE_CONST p2.From.Type = obj.TYPE_CONST
p4.From.Offset = sz p2.From.Offset = chunk
p4.To.Type = obj.TYPE_REG p2.To.Type = obj.TYPE_REG
p4.To.Reg = v.Args[1].Reg() p2.To.Reg = dst
p5 := s.Prog(riscv.ABGEU) p3 := s.Prog(riscv.ABNE)
p5.To.Type = obj.TYPE_BRANCH p3.From.Reg = riscv.REG_X6
p5.Reg = v.Args[1].Reg() p3.From.Type = obj.TYPE_REG
p5.From.Type = obj.TYPE_REG p3.Reg = src
p5.From.Reg = v.Args[2].Reg() p3.To.Type = obj.TYPE_BRANCH
p5.To.SetTarget(p) p3.To.SetTarget(p.Link)
n %= chunk
var off int64
for n >= sz {
moveOp(s, mov, dst, src, tmp, off)
off += sz
n -= sz
}
for i := len(fracMovOps) - 1; i >= 0; i-- {
tsz := int64(1 << i)
if n < tsz {
continue
}
moveOp(s, fracMovOps[i], dst, src, tmp, off)
off += tsz
n -= tsz
}
case ssa.OpRISCV64LoweredNilCheck: case ssa.OpRISCV64LoweredNilCheck:
// Issue a load which will fault if arg is nil. // Issue a load which will fault if arg is nil.
@ -836,20 +949,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpRISCV64DUFFZERO:
p := s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = v.AuxInt
case ssa.OpRISCV64DUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffcopy
p.To.Offset = v.AuxInt
case ssa.OpRISCV64LoweredPubBarrier: case ssa.OpRISCV64LoweredPubBarrier:
// FENCE // FENCE
s.Prog(v.Op.Asm()) s.Prog(v.Op.Asm())
@ -955,3 +1054,31 @@ func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg in
p.Pos = p.Pos.WithNotStmt() p.Pos = p.Pos.WithNotStmt()
return p return p
} }
func zeroOp(s *ssagen.State, mov obj.As, reg int16, off int64) {
p := s.Prog(mov)
p.From.Type = obj.TYPE_REG
p.From.Reg = riscv.REG_ZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = reg
p.To.Offset = off
return
}
func moveOp(s *ssagen.State, mov obj.As, dst int16, src int16, tmp int16, off int64) {
p := s.Prog(mov)
p.From.Type = obj.TYPE_MEM
p.From.Reg = src
p.From.Offset = off
p.To.Type = obj.TYPE_REG
p.To.Reg = tmp
p1 := s.Prog(mov)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = tmp
p1.To.Type = obj.TYPE_MEM
p1.To.Reg = dst
p1.To.Offset = off
return
}

View file

@ -608,7 +608,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
} }
case ssa.OpS390XLoweredPanicBoundsCR: case ssa.OpS390XLoweredPanicBoundsCR:
yIsReg = true yIsReg = true
yVal := int(v.Args[0].Reg() - s390x.REG_R0) yVal = int(v.Args[0].Reg() - s390x.REG_R0)
c := v.Aux.(ssa.PanicBoundsC).C c := v.Aux.(ssa.PanicBoundsC).C
if c >= 0 && c <= abi.BoundsMaxConst { if c >= 0 && c <= abi.BoundsMaxConst {
xVal = int(c) xVal = int(c)

View file

@ -156,12 +156,14 @@ func init() {
gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}} gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}} gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}}
gp1flags = regInfo{inputs: []regMask{gpg}} gp1flags = regInfo{inputs: []regMask{gpg}}
gp1flagsflags = regInfo{inputs: []regMask{gpg}}
gp1flags1 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}} gp1flags1 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
gp11flags = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp, 0}} gp11flags = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp, 0}}
gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}} gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}}
gp21nog = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}} gp21nog = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}}
gp21flags = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}} gp21flags = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}}
gp2flags = regInfo{inputs: []regMask{gpg, gpg}} gp2flags = regInfo{inputs: []regMask{gpg, gpg}}
gp2flagsflags = regInfo{inputs: []regMask{gpg, gpg}}
gp2flags1 = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}} gp2flags1 = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}}
gp2flags1flags = regInfo{inputs: []regMask{gp, gp, 0}, outputs: []regMask{gp, 0}} gp2flags1flags = regInfo{inputs: []regMask{gp, gp, 0}, outputs: []regMask{gp, 0}}
gp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}} gp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
@ -508,6 +510,22 @@ func init() {
{name: "CSNEG", argLength: 3, reg: gp2flags1, asm: "CSNEG", aux: "CCop"}, // auxint(flags) ? arg0 : -arg1 {name: "CSNEG", argLength: 3, reg: gp2flags1, asm: "CSNEG", aux: "CCop"}, // auxint(flags) ? arg0 : -arg1
{name: "CSETM", argLength: 1, reg: readflags, asm: "CSETM", aux: "CCop"}, // auxint(flags) ? -1 : 0 {name: "CSETM", argLength: 1, reg: readflags, asm: "CSETM", aux: "CCop"}, // auxint(flags) ? -1 : 0
// conditional comparison instructions; auxint is
// combination of Cond, Nzcv and optional ConstValue
// Behavior:
// If the condition 'Cond' evaluates to true against current flags,
// flags are set to the result of the comparison operation.
// Otherwise, flags are set to the fallback value 'Nzcv'.
{name: "CCMP", argLength: 3, reg: gp2flagsflags, asm: "CCMP", aux: "ARM64ConditionalParams", typ: "Flag"}, // If Cond then flags = CMP arg0 arg1 else flags = Nzcv
{name: "CCMN", argLength: 3, reg: gp2flagsflags, asm: "CCMN", aux: "ARM64ConditionalParams", typ: "Flag"}, // If Cond then flags = CMN arg0 arg1 else flags = Nzcv
{name: "CCMPconst", argLength: 2, reg: gp1flagsflags, asm: "CCMP", aux: "ARM64ConditionalParams", typ: "Flag"}, // If Cond then flags = CMPconst [ConstValue] arg0 else flags = Nzcv
{name: "CCMNconst", argLength: 2, reg: gp1flagsflags, asm: "CCMN", aux: "ARM64ConditionalParams", typ: "Flag"}, // If Cond then flags = CMNconst [ConstValue] arg0 else flags = Nzcv
{name: "CCMPW", argLength: 3, reg: gp2flagsflags, asm: "CCMPW", aux: "ARM64ConditionalParams", typ: "Flag"}, // If Cond then flags = CMPW arg0 arg1 else flags = Nzcv
{name: "CCMNW", argLength: 3, reg: gp2flagsflags, asm: "CCMNW", aux: "ARM64ConditionalParams", typ: "Flag"}, // If Cond then flags = CMNW arg0 arg1 else flags = Nzcv
{name: "CCMPWconst", argLength: 2, reg: gp1flagsflags, asm: "CCMPW", aux: "ARM64ConditionalParams", typ: "Flag"}, // If Cond then flags = CCMPWconst [ConstValue] arg0 else flags = Nzcv
{name: "CCMNWconst", argLength: 2, reg: gp1flagsflags, asm: "CCMNW", aux: "ARM64ConditionalParams", typ: "Flag"}, // If Cond then flags = CCMNWconst [ConstValue] arg0 else flags = Nzcv
// function calls // function calls
{name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem {name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
{name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem {name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem

View file

@ -17,8 +17,8 @@
(Hmul64 ...) => (MULHV ...) (Hmul64 ...) => (MULHV ...)
(Hmul64u ...) => (MULHVU ...) (Hmul64u ...) => (MULHVU ...)
(Hmul32 x y) => (SRAVconst (MULV (SignExt32to64 x) (SignExt32to64 y)) [32]) (Hmul32 ...) => (MULH ...)
(Hmul32u x y) => (SRLVconst (MULV (ZeroExt32to64 x) (ZeroExt32to64 y)) [32]) (Hmul32u ...) => (MULHU ...)
(Div64 x y) => (DIVV x y) (Div64 x y) => (DIVV x y)
(Div64u ...) => (DIVVU ...) (Div64u ...) => (DIVVU ...)
@ -419,34 +419,8 @@
(MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore [8] dst (MOVVload [8] src mem)
(MOVVstore dst (MOVVload src mem) mem)) (MOVVstore dst (MOVVload src mem) mem))
// strip off fractional word move (Move [s] dst src mem) && s > 16 && s < 192 && logLargeCopy(v, s) => (LoweredMove [s] dst src mem)
(Move [s] dst src mem) && s%8 != 0 && s > 16 => (Move [s] dst src mem) && s >= 192 && logLargeCopy(v, s) => (LoweredMoveLoop [s] dst src mem)
(Move [s%8]
(OffPtr <dst.Type> dst [s-s%8])
(OffPtr <src.Type> src [s-s%8])
(Move [s-s%8] dst src mem))
// medium move uses a duff device
(Move [s] dst src mem)
&& s%8 == 0 && s > 16 && s <= 8*128
&& logLargeCopy(v, s) =>
(DUFFCOPY [16 * (128 - s/8)] dst src mem)
// 16 and 128 are magic constants. 16 is the number of bytes to encode:
// MOVV (R20), R30
// ADDV $8, R20
// MOVV R30, (R21)
// ADDV $8, R21
// and 128 is the number of such blocks. See runtime/duff_loong64.s:duffcopy.
// large move uses a loop
(Move [s] dst src mem)
&& s%8 == 0 && s > 1024 && logLargeCopy(v, s) =>
(LoweredMove
dst
src
(ADDVconst <src.Type> src [s-8])
mem)
// float <=> int register moves, with no conversion. // float <=> int register moves, with no conversion.
// These come up when compiling math.{Float64bits, Float64frombits, Float32bits, Float32frombits}. // These come up when compiling math.{Float64bits, Float64frombits, Float32bits, Float32frombits}.
@ -455,6 +429,10 @@
(MOVWUload [off] {sym} ptr (MOVFstore [off] {sym} ptr val _)) => (ZeroExt32to64 (MOVWfpgp <typ.Float32> val)) (MOVWUload [off] {sym} ptr (MOVFstore [off] {sym} ptr val _)) => (ZeroExt32to64 (MOVWfpgp <typ.Float32> val))
(MOVFload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) => (MOVWgpfp val) (MOVFload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) => (MOVWgpfp val)
// If the memory load and store operations use the same ptr, they are combined into a direct move operation between registers.
(MOV(V|W|H|B)load [off] {sym} ptr (MOV(V|W|H|B)store [off] {sym} ptr x _)) => (MOV(V|W|H|B)reg x)
(MOV(W|H|B)Uload [off] {sym} ptr (MOV(W|H|B)store [off] {sym} ptr x _)) => (MOV(W|H|B)Ureg x)
// Similarly for stores, if we see a store after FPR <=> GPR move, then redirect store to use the other register set. // Similarly for stores, if we see a store after FPR <=> GPR move, then redirect store to use the other register set.
(MOVVstore [off] {sym} ptr (MOVVfpgp val) mem) => (MOVDstore [off] {sym} ptr val mem) (MOVVstore [off] {sym} ptr (MOVVfpgp val) mem) => (MOVDstore [off] {sym} ptr val mem)
(MOVDstore [off] {sym} ptr (MOVVgpfp val) mem) => (MOVVstore [off] {sym} ptr val mem) (MOVDstore [off] {sym} ptr (MOVVgpfp val) mem) => (MOVVstore [off] {sym} ptr val mem)
@ -752,6 +730,9 @@
(ADDV x0 x1:(SLLVconst [c] y)) && x1.Uses == 1 && c > 0 && c <= 4 => (ADDshiftLLV x0 y [c]) (ADDV x0 x1:(SLLVconst [c] y)) && x1.Uses == 1 && c > 0 && c <= 4 => (ADDshiftLLV x0 y [c])
// fold constant in ADDshift op
(ADDshiftLLV x (MOVVconst [c]) [d]) && is12Bit(c<<d) => (ADDVconst x [c<<d])
// div by constant // div by constant
(DIVVU x (MOVVconst [1])) => x (DIVVU x (MOVVconst [1])) => x
(DIVVU x (MOVVconst [c])) && isPowerOfTwo(c) => (SRLVconst [log64(c)] x) (DIVVU x (MOVVconst [c])) && isPowerOfTwo(c) => (SRLVconst [log64(c)] x)
@ -816,6 +797,7 @@
(SUBVconst [c] (SUBVconst [d] x)) && is32Bit(-c-d) => (ADDVconst [-c-d] x) (SUBVconst [c] (SUBVconst [d] x)) && is32Bit(-c-d) => (ADDVconst [-c-d] x)
(SUBVconst [c] (ADDVconst [d] x)) && is32Bit(-c+d) => (ADDVconst [-c+d] x) (SUBVconst [c] (ADDVconst [d] x)) && is32Bit(-c+d) => (ADDVconst [-c+d] x)
(SUBV (MOVVconst [c]) (NEGV (SUBVconst [d] x))) => (ADDVconst [c-d] x) (SUBV (MOVVconst [c]) (NEGV (SUBVconst [d] x))) => (ADDVconst [c-d] x)
(ADDVconst [c] x) && is32Bit(c) && c&0xffff == 0 && c != 0 => (ADDV16const [c] x)
(SLLVconst [c] (MOVVconst [d])) => (MOVVconst [d<<uint64(c)]) (SLLVconst [c] (MOVVconst [d])) => (MOVVconst [d<<uint64(c)])
(SRLVconst [c] (MOVVconst [d])) => (MOVVconst [int64(uint64(d)>>uint64(c))]) (SRLVconst [c] (MOVVconst [d])) => (MOVVconst [int64(uint64(d)>>uint64(c))])
(SRAVconst [c] (MOVVconst [d])) => (MOVVconst [d>>uint64(c)]) (SRAVconst [c] (MOVVconst [d])) => (MOVVconst [d>>uint64(c)])
@ -970,3 +952,12 @@
&& isInlinableMemmove(dst, src, sz, config) && isInlinableMemmove(dst, src, sz, config)
&& clobber(call) && clobber(call)
=> (Move [sz] dst src mem) => (Move [sz] dst src mem)
// fold readonly sym load
(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read8(sym, int64(off)))])
(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
(MOVWUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
(MOVVload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(int8(read8(sym, int64(off))))])
(MOVHload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))])

View file

@ -189,14 +189,17 @@ func init() {
{name: "VPCNT16", argLength: 1, reg: fp11, asm: "VPCNTH"}, // count set bits for each 16-bit unit and store the result in each 16-bit unit {name: "VPCNT16", argLength: 1, reg: fp11, asm: "VPCNTH"}, // count set bits for each 16-bit unit and store the result in each 16-bit unit
// binary ops // binary ops
{name: "ADDV", argLength: 2, reg: gp21, asm: "ADDVU", commutative: true}, // arg0 + arg1 {name: "ADDV", argLength: 2, reg: gp21, asm: "ADDVU", commutative: true}, // arg0 + arg1
{name: "ADDVconst", argLength: 1, reg: gp11sp, asm: "ADDVU", aux: "Int64"}, // arg0 + auxInt. auxInt is 32-bit, also in other *const ops. {name: "ADDVconst", argLength: 1, reg: gp11sp, asm: "ADDVU", aux: "Int64"}, // arg0 + auxInt. auxInt is 32-bit, also in other *const ops.
{name: "SUBV", argLength: 2, reg: gp21, asm: "SUBVU"}, // arg0 - arg1 {name: "ADDV16const", argLength: 1, reg: gp11sp, asm: "ADDV16", aux: "Int64"}, // arg0 + auxInt. auxInt is signed 32-bit and is a multiple of 65536, also in other *const ops.
{name: "SUBVconst", argLength: 1, reg: gp11, asm: "SUBVU", aux: "Int64"}, // arg0 - auxInt {name: "SUBV", argLength: 2, reg: gp21, asm: "SUBVU"}, // arg0 - arg1
{name: "SUBVconst", argLength: 1, reg: gp11, asm: "SUBVU", aux: "Int64"}, // arg0 - auxInt
{name: "MULV", argLength: 2, reg: gp21, asm: "MULV", commutative: true, typ: "Int64"}, // arg0 * arg1 {name: "MULV", argLength: 2, reg: gp21, asm: "MULV", commutative: true, typ: "Int64"}, // arg0 * arg1
{name: "MULHV", argLength: 2, reg: gp21, asm: "MULHV", commutative: true, typ: "Int64"}, // (arg0 * arg1) >> 64, signed {name: "MULHV", argLength: 2, reg: gp21, asm: "MULHV", commutative: true, typ: "Int64"}, // (arg0 * arg1) >> 64, signed
{name: "MULHVU", argLength: 2, reg: gp21, asm: "MULHVU", commutative: true, typ: "UInt64"}, // (arg0 * arg1) >> 64, unsigned {name: "MULHVU", argLength: 2, reg: gp21, asm: "MULHVU", commutative: true, typ: "UInt64"}, // (arg0 * arg1) >> 64, unsigned
{name: "MULH", argLength: 2, reg: gp21, asm: "MULH", commutative: true, typ: "Int32"}, // (arg0 * arg1) >> 32, signed
{name: "MULHU", argLength: 2, reg: gp21, asm: "MULHU", commutative: true, typ: "UInt32"}, // (arg0 * arg1) >> 32, unsigned
{name: "DIVV", argLength: 2, reg: gp21, asm: "DIVV", typ: "Int64"}, // arg0 / arg1, signed {name: "DIVV", argLength: 2, reg: gp21, asm: "DIVV", typ: "Int64"}, // arg0 / arg1, signed
{name: "DIVVU", argLength: 2, reg: gp21, asm: "DIVVU", typ: "UInt64"}, // arg0 / arg1, unsigned {name: "DIVVU", argLength: 2, reg: gp21, asm: "DIVVU", typ: "UInt64"}, // arg0 / arg1, unsigned
{name: "REMV", argLength: 2, reg: gp21, asm: "REMV", typ: "Int64"}, // arg0 / arg1, signed {name: "REMV", argLength: 2, reg: gp21, asm: "REMV", typ: "Int64"}, // arg0 / arg1, signed
@ -358,24 +361,6 @@ func init() {
{name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{gpsp, buildReg("R29"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem {name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{gpsp, buildReg("R29"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem {name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem
// duffzero
// arg0 = address of memory to zero
// arg1 = mem
// auxint = offset into duffzero code to start executing
// returns mem
// R20 aka loong64.REGRT1 changed as side effect
{
name: "DUFFZERO",
aux: "Int64",
argLength: 2,
reg: regInfo{
inputs: []regMask{buildReg("R20")},
clobbers: buildReg("R20 R1"),
},
typ: "Mem",
faultOnNilArg0: true,
},
// medium zeroing // medium zeroing
// arg0 = address of memory to zero // arg0 = address of memory to zero
// arg1 = mem // arg1 = mem
@ -391,25 +376,6 @@ func init() {
faultOnNilArg0: true, faultOnNilArg0: true,
}, },
// duffcopy
// arg0 = address of dst memory (in R21, changed as side effect)
// arg1 = address of src memory (in R20, changed as side effect)
// arg2 = mem
// auxint = offset into duffcopy code to start executing
// returns mem
{
name: "DUFFCOPY",
aux: "Int64",
argLength: 3,
reg: regInfo{
inputs: []regMask{buildReg("R21"), buildReg("R20")},
clobbers: buildReg("R20 R21 R1"),
},
typ: "Mem",
faultOnNilArg0: true,
faultOnNilArg1: true,
},
// large zeroing // large zeroing
// arg0 = address of memory to zero // arg0 = address of memory to zero
// arg1 = mem // arg1 = mem
@ -427,27 +393,40 @@ func init() {
needIntTemp: true, needIntTemp: true,
}, },
// large or unaligned move // medium copying
// arg0 = address of dst memory (in R21, changed as side effect) // arg0 = address of dst memory
// arg1 = address of src memory (in R20, changed as side effect) // arg1 = address of src memory
// arg2 = address of the last element of src // arg2 = mem
// arg3 = mem // auxint = number of bytes to copy
// auxint = alignment
// returns mem // returns mem
// MOVx (R20), Rtmp
// MOVx Rtmp, (R21)
// ADDV $sz, R20
// ADDV $sz, R21
// BGEU Rarg2, R20, -4(PC)
{ {
name: "LoweredMove", name: "LoweredMove",
aux: "Int64", aux: "Int64",
argLength: 4, argLength: 3,
reg: regInfo{ reg: regInfo{
inputs: []regMask{buildReg("R21"), buildReg("R20"), gp}, inputs: []regMask{gp &^ buildReg("R20"), gp &^ buildReg("R20")},
clobbers: buildReg("R20 R21"), clobbers: buildReg("R20"),
},
faultOnNilArg0: true,
faultOnNilArg1: true,
},
// large copying
// arg0 = address of dst memory
// arg1 = address of src memory
// arg2 = mem
// auxint = number of bytes to copy
// returns mem
{
name: "LoweredMoveLoop",
aux: "Int64",
argLength: 3,
reg: regInfo{
inputs: []regMask{gp &^ buildReg("R20 R21"), gp &^ buildReg("R20 R21")},
clobbers: buildReg("R20 R21"),
clobbersArg0: true,
clobbersArg1: true,
}, },
typ: "Mem",
faultOnNilArg0: true, faultOnNilArg0: true,
faultOnNilArg1: true, faultOnNilArg1: true,
}, },

View file

@ -544,14 +544,6 @@
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVFstore [off1+int32(off2)] {sym} ptr val mem) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVFstore [off1+int32(off2)] {sym} ptr val mem)
(MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) (MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVDstore [off1+int32(off2)] {sym} ptr val mem) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
(MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
(MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
(MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
(MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVVstorezero [off1+int32(off2)] {sym} ptr mem)
(MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) (MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
@ -614,28 +606,6 @@
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) => && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
(MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
(MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
(MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
(MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
// store zero
(MOVBstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
(MOVHstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
(MOVWstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
(MOVVstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVVstorezero [off] {sym} ptr mem)
// don't extend after proper load // don't extend after proper load
(MOVBreg x:(MOVBload _ _)) => (MOVVreg x) (MOVBreg x:(MOVBload _ _)) => (MOVVreg x)

View file

@ -29,7 +29,7 @@ import "strings"
// so that regmask stays within int64 // so that regmask stays within int64
// Be careful when hand coding regmasks. // Be careful when hand coding regmasks.
var regNamesMIPS64 = []string{ var regNamesMIPS64 = []string{
"R0", // constant 0 "ZERO", // constant 0
"R1", "R1",
"R2", "R2",
"R3", "R3",
@ -137,16 +137,17 @@ func init() {
hi = buildReg("HI") hi = buildReg("HI")
callerSave = gp | fp | lo | hi | buildReg("g") // runtime.setg (and anything calling it) may clobber g callerSave = gp | fp | lo | hi | buildReg("g") // runtime.setg (and anything calling it) may clobber g
first16 = buildReg("R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16") first16 = buildReg("R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16")
rz = buildReg("ZERO")
) )
// Common regInfo // Common regInfo
var ( var (
gp01 = regInfo{inputs: nil, outputs: []regMask{gp}} gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}} gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}} gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}}
gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}} gp21 = regInfo{inputs: []regMask{gpg, gpg | rz}, outputs: []regMask{gp}}
gp2hilo = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{hi, lo}} gp2hilo = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{hi, lo}}
gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}} gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}
gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}} gpstore = regInfo{inputs: []regMask{gpspsbg, gpg | rz}}
gpstore0 = regInfo{inputs: []regMask{gpspsbg}} gpstore0 = regInfo{inputs: []regMask{gpspsbg}}
gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}} gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}} gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}}
@ -242,10 +243,7 @@ func init() {
{name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. {name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
{name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. {name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
{name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem. {name: "ZERO", zeroWidth: true, fixedReg: true},
{name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem.
{name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of zero to arg0 + auxInt + aux. arg1=mem.
{name: "MOVVstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of zero to arg0 + auxInt + aux. ar12=mem.
// moves (no conversion) // moves (no conversion)
{name: "MOVWfpgp", argLength: 1, reg: fpgp, asm: "MOVW"}, // move float32 to int32 (no conversion). MIPS64 will perform sign-extend to 64-bit by default {name: "MOVWfpgp", argLength: 1, reg: fpgp, asm: "MOVW"}, // move float32 to int32 (no conversion). MIPS64 will perform sign-extend to 64-bit by default

View file

@ -1,7 +1,6 @@
// Copyright 2022 The Go Authors. All rights reserved. // Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package runtime // use zero register
(MOVVconst [0]) => (ZERO)
func osArchInit() {}

View file

@ -373,36 +373,14 @@
(MOVHstore [4] ptr (MOVDconst [0]) (MOVHstore [4] ptr (MOVDconst [0])
(MOVHstore [2] ptr (MOVDconst [0]) (MOVHstore [2] ptr (MOVDconst [0])
(MOVHstore ptr (MOVDconst [0]) mem))) (MOVHstore ptr (MOVDconst [0]) mem)))
(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 =>
(MOVWstore [8] ptr (MOVDconst [0])
(MOVWstore [4] ptr (MOVDconst [0])
(MOVWstore ptr (MOVDconst [0]) mem)))
(Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 =>
(MOVDstore [8] ptr (MOVDconst [0])
(MOVDstore ptr (MOVDconst [0]) mem))
(Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 =>
(MOVDstore [16] ptr (MOVDconst [0])
(MOVDstore [8] ptr (MOVDconst [0])
(MOVDstore ptr (MOVDconst [0]) mem)))
(Zero [32] {t} ptr mem) && t.Alignment()%8 == 0 =>
(MOVDstore [24] ptr (MOVDconst [0])
(MOVDstore [16] ptr (MOVDconst [0])
(MOVDstore [8] ptr (MOVDconst [0])
(MOVDstore ptr (MOVDconst [0]) mem))))
// Medium 8-aligned zeroing uses a Duff's device // Unroll zeroing in medium size (at most 192 bytes i.e. 3 cachelines)
// 8 and 128 are magic constants, see runtime/mkduff.go (Zero [s] {t} ptr mem) && s <= 24*moveSize(t.Alignment(), config) =>
(Zero [s] {t} ptr mem) (LoweredZero [makeValAndOff(int32(s),int32(t.Alignment()))] ptr mem)
&& s%8 == 0 && s <= 8*128
&& t.Alignment()%8 == 0 =>
(DUFFZERO [8 * (128 - s/8)] ptr mem)
// Generic zeroing uses a loop // Generic zeroing uses a loop
(Zero [s] {t} ptr mem) => (Zero [s] {t} ptr mem) && s > 24*moveSize(t.Alignment(), config) =>
(LoweredZero [t.Alignment()] (LoweredZeroLoop [makeValAndOff(int32(s),int32(t.Alignment()))] ptr mem)
ptr
(ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.Alignment(), config)]))
mem)
// Checks // Checks
(IsNonNil ...) => (SNEZ ...) (IsNonNil ...) => (SNEZ ...)
@ -464,37 +442,16 @@
(MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [4] dst (MOVHload [4] src mem)
(MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore [2] dst (MOVHload [2] src mem)
(MOVHstore dst (MOVHload src mem) mem))) (MOVHstore dst (MOVHload src mem) mem)))
(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 =>
(MOVWstore [8] dst (MOVWload [8] src mem)
(MOVWstore [4] dst (MOVWload [4] src mem)
(MOVWstore dst (MOVWload src mem) mem)))
(Move [16] {t} dst src mem) && t.Alignment()%8 == 0 =>
(MOVDstore [8] dst (MOVDload [8] src mem)
(MOVDstore dst (MOVDload src mem) mem))
(Move [24] {t} dst src mem) && t.Alignment()%8 == 0 =>
(MOVDstore [16] dst (MOVDload [16] src mem)
(MOVDstore [8] dst (MOVDload [8] src mem)
(MOVDstore dst (MOVDload src mem) mem)))
(Move [32] {t} dst src mem) && t.Alignment()%8 == 0 =>
(MOVDstore [24] dst (MOVDload [24] src mem)
(MOVDstore [16] dst (MOVDload [16] src mem)
(MOVDstore [8] dst (MOVDload [8] src mem)
(MOVDstore dst (MOVDload src mem) mem))))
// Medium 8-aligned move uses a Duff's device // Generic move
// 16 and 128 are magic constants, see runtime/mkduff.go (Move [s] {t} dst src mem) && s > 0 && s <= 3*8*moveSize(t.Alignment(), config)
(Move [s] {t} dst src mem)
&& s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0
&& logLargeCopy(v, s) => && logLargeCopy(v, s) =>
(DUFFCOPY [16 * (128 - s/8)] dst src mem) (LoweredMove [makeValAndOff(int32(s),int32(t.Alignment()))] dst src mem)
// Generic move uses a loop // Generic move uses a loop
(Move [s] {t} dst src mem) && (s <= 16 || logLargeCopy(v, s)) => (Move [s] {t} dst src mem) && s > 3*8*moveSize(t.Alignment(), config)
(LoweredMove [t.Alignment()] && logLargeCopy(v, s) =>
dst (LoweredMoveLoop [makeValAndOff(int32(s),int32(t.Alignment()))] dst src mem)
src
(ADDI <src.Type> [s-moveSize(t.Alignment(), config)] src)
mem)
// Boolean ops; 0=false, 1=true // Boolean ops; 0=false, 1=true
(AndB ...) => (AND ...) (AndB ...) => (AND ...)
@ -716,6 +673,8 @@
(MOVWUreg <t> x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWUload <t> [off] {sym} ptr mem) (MOVWUreg <t> x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWUload <t> [off] {sym} ptr mem)
// Replace load from same location as preceding store with copy. // Replace load from same location as preceding store with copy.
(MOV(D|W|H|B)load [off] {sym} ptr1 (MOV(D|W|H|B)store [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOV(D|W|H|B)reg x)
(MOV(W|H|B)Uload [off] {sym} ptr1 (MOV(W|H|B)store [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOV(W|H|B)Ureg x)
(MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVXD x) (MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVXD x)
(FMOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVDX x) (FMOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVDX x)
(MOVWload [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVXS x) (MOVWload [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVXS x)

View file

@ -117,6 +117,7 @@ func init() {
regCtxt := regNamed["X26"] regCtxt := regNamed["X26"]
callerSave := gpMask | fpMask | regNamed["g"] callerSave := gpMask | fpMask | regNamed["g"]
r5toR6 := regNamed["X5"] | regNamed["X6"]
var ( var (
gpstore = regInfo{inputs: []regMask{gpspsbMask, gpspMask, 0}} // SB in first input so we can load from a global, but not in second to avoid using SB as a temporary register gpstore = regInfo{inputs: []regMask{gpspsbMask, gpspMask, 0}} // SB in first input so we can load from a global, but not in second to avoid using SB as a temporary register
@ -277,89 +278,90 @@ func init() {
{name: "CALLclosure", argLength: -1, reg: callClosure, aux: "CallOff", call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem {name: "CALLclosure", argLength: -1, reg: callClosure, aux: "CallOff", call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: -1, reg: callInter, aux: "CallOff", call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem {name: "CALLinter", argLength: -1, reg: callInter, aux: "CallOff", call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem
// duffzero // Generic moves and zeros
// arg0 = address of memory to zero (in X25, changed as side effect)
// general unrolled zeroing
// arg0 = address of memory to zero
// arg1 = mem // arg1 = mem
// auxint = offset into duffzero code to start executing // auxint = element size and type alignment
// X1 (link register) changed because of function call // returns mem
// mov ZERO, (OFFSET)(Rarg0)
{
name: "LoweredZero",
aux: "SymValAndOff",
typ: "Mem",
argLength: 2,
symEffect: "Write",
faultOnNilArg0: true,
reg: regInfo{
inputs: []regMask{gpMask},
},
},
// general unaligned zeroing
// arg0 = address of memory to zero (clobber)
// arg2 = mem
// auxint = element size and type alignment
// returns mem // returns mem
{ {
name: "DUFFZERO", name: "LoweredZeroLoop",
aux: "Int64", aux: "SymValAndOff",
argLength: 2,
reg: regInfo{
inputs: []regMask{regNamed["X25"]},
clobbers: regNamed["X1"] | regNamed["X25"],
},
typ: "Mem", typ: "Mem",
argLength: 2,
symEffect: "Write",
needIntTemp: true,
faultOnNilArg0: true, faultOnNilArg0: true,
reg: regInfo{
inputs: []regMask{gpMask},
clobbersArg0: true,
},
}, },
// duffcopy // general unaligned move
// arg0 = address of dst memory (in X25, changed as side effect) // arg0 = address of dst memory (clobber)
// arg1 = address of src memory (in X24, changed as side effect) // arg1 = address of src memory (clobber)
// arg2 = mem // arg2 = mem
// auxint = offset into duffcopy code to start executing // auxint = size and type alignment
// X1 (link register) changed because of function call
// returns mem // returns mem
// mov (offset)(Rarg1), TMP
// mov TMP, (offset)(Rarg0)
{ {
name: "DUFFCOPY", name: "LoweredMove",
aux: "Int64", aux: "SymValAndOff",
symEffect: "Write",
argLength: 3, argLength: 3,
reg: regInfo{ reg: regInfo{
inputs: []regMask{regNamed["X25"], regNamed["X24"]}, inputs: []regMask{gpMask &^ regNamed["X5"], gpMask &^ regNamed["X5"]},
clobbers: regNamed["X1"] | regNamed["X24"] | regNamed["X25"], clobbers: regNamed["X5"],
}, },
typ: "Mem",
faultOnNilArg0: true, faultOnNilArg0: true,
faultOnNilArg1: true, faultOnNilArg1: true,
}, },
// Generic moves and zeros
// general unaligned zeroing
// arg0 = address of memory to zero (in X5, changed as side effect)
// arg1 = address of the last element to zero (inclusive)
// arg2 = mem
// auxint = element size
// returns mem
// mov ZERO, (X5)
// ADD $sz, X5
// BGEU Rarg1, X5, -2(PC)
{
name: "LoweredZero",
aux: "Int64",
argLength: 3,
reg: regInfo{
inputs: []regMask{regNamed["X5"], gpMask},
clobbers: regNamed["X5"],
},
typ: "Mem",
faultOnNilArg0: true,
},
// general unaligned move // general unaligned move
// arg0 = address of dst memory (in X5, changed as side effect) // arg0 = address of dst memory (clobber)
// arg1 = address of src memory (in X6, changed as side effect) // arg1 = address of src memory (clobber)
// arg2 = address of the last element of src (can't be X7 as we clobber it before using arg2)
// arg3 = mem // arg3 = mem
// auxint = alignment // auxint = alignment
// clobbers X7 as a tmp register.
// returns mem // returns mem
// mov (X6), X7
// mov X7, (X5)
// ADD $sz, X5
// ADD $sz, X6 // ADD $sz, X6
// BGEU Rarg2, X5, -4(PC) //loop:
// mov (Rarg1), X5
// mov X5, (Rarg0)
// ...rest 7 mov...
// ADD $sz, Rarg0
// ADD $sz, Rarg1
// BNE X6, Rarg1, loop
{ {
name: "LoweredMove", name: "LoweredMoveLoop",
aux: "Int64", aux: "SymValAndOff",
argLength: 4, argLength: 3,
symEffect: "Write",
reg: regInfo{ reg: regInfo{
inputs: []regMask{regNamed["X5"], regNamed["X6"], gpMask &^ regNamed["X7"]}, inputs: []regMask{gpMask &^ r5toR6, gpMask &^ r5toR6},
clobbers: regNamed["X5"] | regNamed["X6"] | regNamed["X7"], clobbers: r5toR6,
clobbersArg0: true,
clobbersArg1: true,
}, },
typ: "Mem",
faultOnNilArg0: true, faultOnNilArg0: true,
faultOnNilArg1: true, faultOnNilArg1: true,
}, },

View file

@ -2088,7 +2088,7 @@
(NilCheck ptr:(NilCheck _ _) _ ) => ptr (NilCheck ptr:(NilCheck _ _) _ ) => ptr
// for late-expanded calls, recognize memequal applied to a single constant byte // for late-expanded calls, recognize memequal applied to a single constant byte
// Support is limited by 1, 2, 4, 8 byte sizes // Support is limited by [1-8] byte sizes
(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [1]) mem) (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [1]) mem)
&& isSameCall(callAux, "runtime.memequal") && isSameCall(callAux, "runtime.memequal")
&& symIsRO(scon) && symIsRO(scon)
@ -2135,6 +2135,118 @@
&& canLoadUnaligned(config) && config.PtrSize == 8 && canLoadUnaligned(config) && config.PtrSize == 8
=> (MakeResult (Eq64 (Load <typ.Int64> sptr mem) (Const64 <typ.Int64> [int64(read64(scon,0,config.ctxt.Arch.ByteOrder))])) mem) => (MakeResult (Eq64 (Load <typ.Int64> sptr mem) (Const64 <typ.Int64> [int64(read64(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [3]) mem)
&& isSameCall(callAux, "runtime.memequal")
&& symIsRO(scon)
&& canLoadUnaligned(config) =>
(MakeResult
(Eq32
(Or32 <typ.Int32>
(ZeroExt16to32 <typ.Int32> (Load <typ.Int16> sptr mem))
(Lsh32x32 <typ.Int32>
(ZeroExt8to32 <typ.Int32> (Load <typ.Int8> (OffPtr <typ.BytePtr> [2] sptr) mem))
(Const32 <typ.Int32> [16])))
(Const32 <typ.Int32> [int32(uint32(read16(scon,0,config.ctxt.Arch.ByteOrder))|(uint32(read8(scon,2))<<16))]))
mem)
(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [3]) mem)
&& isSameCall(callAux, "runtime.memequal")
&& symIsRO(scon)
&& canLoadUnaligned(config) =>
(MakeResult
(Eq32
(Or32 <typ.Int32>
(ZeroExt16to32 <typ.Int32> (Load <typ.Int16> sptr mem))
(Lsh32x32 <typ.Int32>
(ZeroExt8to32 <typ.Int32> (Load <typ.Int8> (OffPtr <typ.BytePtr> [2] sptr) mem))
(Const32 <typ.Int32> [16])))
(Const32 <typ.Int32> [int32(uint32(read16(scon,0,config.ctxt.Arch.ByteOrder))|(uint32(read8(scon,2))<<16))]))
mem)
(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [5]) mem)
&& isSameCall(callAux, "runtime.memequal")
&& symIsRO(scon)
&& canLoadUnaligned(config) && config.PtrSize == 8 =>
(MakeResult
(Eq64
(Or64 <typ.Int64>
(ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem))
(Lsh64x64 <typ.Int64>
(ZeroExt8to64 <typ.Int64> (Load <typ.Int8> (OffPtr <typ.BytePtr> [4] sptr) mem))
(Const64 <typ.Int64> [32])))
(Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read8(scon,4))<<32))]))
mem)
(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [5]) mem)
&& isSameCall(callAux, "runtime.memequal")
&& symIsRO(scon)
&& canLoadUnaligned(config) && config.PtrSize == 8 =>
(MakeResult
(Eq64
(Or64 <typ.Int64>
(ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem))
(Lsh64x64 <typ.Int64>
(ZeroExt8to64 <typ.Int64> (Load <typ.Int8> (OffPtr <typ.BytePtr> [4] sptr) mem))
(Const64 <typ.Int64> [32])))
(Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read8(scon,4))<<32))]))
mem)
(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [6]) mem)
&& isSameCall(callAux, "runtime.memequal")
&& symIsRO(scon)
&& canLoadUnaligned(config) && config.PtrSize == 8 =>
(MakeResult
(Eq64
(Or64 <typ.Int64>
(ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem))
(Lsh64x64 <typ.Int64>
(ZeroExt16to64 <typ.Int64> (Load <typ.Int16> (OffPtr <typ.BytePtr> [4] sptr) mem))
(Const64 <typ.Int64> [32])))
(Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read16(scon,4,config.ctxt.Arch.ByteOrder))<<32))]))
mem)
(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [6]) mem)
&& isSameCall(callAux, "runtime.memequal")
&& symIsRO(scon)
&& canLoadUnaligned(config) && config.PtrSize == 8 =>
(MakeResult
(Eq64
(Or64 <typ.Int64>
(ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem))
(Lsh64x64 <typ.Int64>
(ZeroExt16to64 <typ.Int64> (Load <typ.Int16> (OffPtr <typ.BytePtr> [4] sptr) mem))
(Const64 <typ.Int64> [32])))
(Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read16(scon,4,config.ctxt.Arch.ByteOrder))<<32))]))
mem)
(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [7]) mem)
&& isSameCall(callAux, "runtime.memequal")
&& symIsRO(scon)
&& canLoadUnaligned(config) && config.PtrSize == 8 =>
(MakeResult
(Eq64
(Or64 <typ.Int64>
(ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem))
(Lsh64x64 <typ.Int64>
(ZeroExt32to64 <typ.Int64> (Load <typ.Int32> (OffPtr <typ.BytePtr> [3] sptr) mem))
(Const64 <typ.Int64> [32])))
(Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read32(scon,3,config.ctxt.Arch.ByteOrder))<<32))]))
mem)
(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [7]) mem)
&& isSameCall(callAux, "runtime.memequal")
&& symIsRO(scon)
&& canLoadUnaligned(config) && config.PtrSize == 8 =>
(MakeResult
(Eq64
(Or64 <typ.Int64>
(ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem))
(Lsh64x64 <typ.Int64>
(ZeroExt32to64 <typ.Int64> (Load <typ.Int32> (OffPtr <typ.BytePtr> [3] sptr) mem))
(Const64 <typ.Int64> [32])))
(Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read32(scon,3,config.ctxt.Arch.ByteOrder))<<32))]))
mem)
(StaticLECall {callAux} _ _ (Const64 [0]) mem) (StaticLECall {callAux} _ _ (Const64 [0]) mem)
&& isSameCall(callAux, "runtime.memequal") && isSameCall(callAux, "runtime.memequal")
=> (MakeResult (ConstBool <typ.Bool> [true]) mem) => (MakeResult (ConstBool <typ.Bool> [true]) mem)
@ -2761,21 +2873,15 @@
(RotateLeft(64|32|16|8) (RotateLeft(64|32|16|8) x c) d) && c.Type.Size() == 2 && d.Type.Size() == 2 => (RotateLeft(64|32|16|8) x (Add16 <c.Type> c d)) (RotateLeft(64|32|16|8) (RotateLeft(64|32|16|8) x c) d) && c.Type.Size() == 2 && d.Type.Size() == 2 => (RotateLeft(64|32|16|8) x (Add16 <c.Type> c d))
(RotateLeft(64|32|16|8) (RotateLeft(64|32|16|8) x c) d) && c.Type.Size() == 1 && d.Type.Size() == 1 => (RotateLeft(64|32|16|8) x (Add8 <c.Type> c d)) (RotateLeft(64|32|16|8) (RotateLeft(64|32|16|8) x c) d) && c.Type.Size() == 1 && d.Type.Size() == 1 => (RotateLeft(64|32|16|8) x (Add8 <c.Type> c d))
// Loading constant values from dictionaries and itabs. // Loading fixed addresses and constants.
(Load <typ.BytePtr> (OffPtr [off] (Addr {s} sb) ) _) && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb) (Load (Addr {s} sb) _) && isFixedLoad(v, s, 0) => rewriteFixedLoad(v, s, sb, 0)
(Load <typ.BytePtr> (OffPtr [off] (Convert (Addr {s} sb) _) ) _) && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb) (Load (Convert (Addr {s} sb) _) _) && isFixedLoad(v, s, 0) => rewriteFixedLoad(v, s, sb, 0)
(Load <typ.BytePtr> (OffPtr [off] (ITab (IMake (Addr {s} sb) _))) _) && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb) (Load (ITab (IMake (Addr {s} sb) _)) _) && isFixedLoad(v, s, 0) => rewriteFixedLoad(v, s, sb, 0)
(Load <typ.BytePtr> (OffPtr [off] (ITab (IMake (Convert (Addr {s} sb) _) _))) _) && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb) (Load (ITab (IMake (Convert (Addr {s} sb) _) _)) _) && isFixedLoad(v, s, 0) => rewriteFixedLoad(v, s, sb, 0)
(Load <typ.Uintptr> (OffPtr [off] (Addr {s} sb) ) _) && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb) (Load (OffPtr [off] (Addr {s} sb) ) _) && isFixedLoad(v, s, off) => rewriteFixedLoad(v, s, sb, off)
(Load <typ.Uintptr> (OffPtr [off] (Convert (Addr {s} sb) _) ) _) && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb) (Load (OffPtr [off] (Convert (Addr {s} sb) _) ) _) && isFixedLoad(v, s, off) => rewriteFixedLoad(v, s, sb, off)
(Load <typ.Uintptr> (OffPtr [off] (ITab (IMake (Addr {s} sb) _))) _) && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb) (Load (OffPtr [off] (ITab (IMake (Addr {s} sb) _))) _) && isFixedLoad(v, s, off) => rewriteFixedLoad(v, s, sb, off)
(Load <typ.Uintptr> (OffPtr [off] (ITab (IMake (Convert (Addr {s} sb) _) _))) _) && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb) (Load (OffPtr [off] (ITab (IMake (Convert (Addr {s} sb) _) _))) _) && isFixedLoad(v, s, off) => rewriteFixedLoad(v, s, sb, off)
// Loading constant values from runtime._type.hash.
(Load <t> (OffPtr [off] (Addr {sym} _) ) _) && t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off) => (Const32 [fixed32(config, sym, off)])
(Load <t> (OffPtr [off] (Convert (Addr {sym} _) _) ) _) && t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off) => (Const32 [fixed32(config, sym, off)])
(Load <t> (OffPtr [off] (ITab (IMake (Addr {sym} _) _))) _) && t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off) => (Const32 [fixed32(config, sym, off)])
(Load <t> (OffPtr [off] (ITab (IMake (Convert (Addr {sym} _) _) _))) _) && t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off) => (Const32 [fixed32(config, sym, off)])
// Calling cmpstring a second time with the same arguments in the // Calling cmpstring a second time with the same arguments in the
// same memory state can reuse the results of the first call. // same memory state can reuse the results of the first call.

View file

@ -1452,7 +1452,7 @@ func opHasAuxInt(op opData) bool {
switch op.aux { switch op.aux {
case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "UInt8", "Float32", "Float64", case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "UInt8", "Float32", "Float64",
"SymOff", "CallOff", "SymValAndOff", "TypSize", "ARM64BitField", "FlagConstant", "CCop", "SymOff", "CallOff", "SymValAndOff", "TypSize", "ARM64BitField", "FlagConstant", "CCop",
"PanicBoundsC", "PanicBoundsCC": "PanicBoundsC", "PanicBoundsCC", "ARM64ConditionalParams":
return true return true
} }
return false return false
@ -1860,6 +1860,8 @@ func (op opData) auxIntType() string {
return "flagConstant" return "flagConstant"
case "ARM64BitField": case "ARM64BitField":
return "arm64BitField" return "arm64BitField"
case "ARM64ConditionalParams":
return "arm64ConditionalParams"
case "PanicBoundsC", "PanicBoundsCC": case "PanicBoundsC", "PanicBoundsCC":
return "int64" return "int64"
default: default:

View file

@ -145,7 +145,7 @@ func checkFunc(f *Func) {
f.Fatalf("bad int32 AuxInt value for %v", v) f.Fatalf("bad int32 AuxInt value for %v", v)
} }
canHaveAuxInt = true canHaveAuxInt = true
case auxInt64, auxARM64BitField: case auxInt64, auxARM64BitField, auxARM64ConditionalParams:
canHaveAuxInt = true canHaveAuxInt = true
case auxInt128: case auxInt128:
// AuxInt must be zero, so leave canHaveAuxInt set to false. // AuxInt must be zero, so leave canHaveAuxInt set to false.

View file

@ -279,6 +279,8 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize, softfloat boo
c.RegSize = 8 c.RegSize = 8
c.lowerBlock = rewriteBlockMIPS64 c.lowerBlock = rewriteBlockMIPS64
c.lowerValue = rewriteValueMIPS64 c.lowerValue = rewriteValueMIPS64
c.lateLowerBlock = rewriteBlockMIPS64latelower
c.lateLowerValue = rewriteValueMIPS64latelower
c.registers = registersMIPS64[:] c.registers = registersMIPS64[:]
c.gpRegMask = gpRegMaskMIPS64 c.gpRegMask = gpRegMaskMIPS64
c.fpRegMask = fpRegMaskMIPS64 c.fpRegMask = fpRegMaskMIPS64

View file

@ -181,34 +181,45 @@ func cse(f *Func) {
for _, e := range partition { for _, e := range partition {
slices.SortFunc(e, func(v, w *Value) int { slices.SortFunc(e, func(v, w *Value) int {
c := cmp.Compare(sdom.domorder(v.Block), sdom.domorder(w.Block)) c := cmp.Compare(sdom.domorder(v.Block), sdom.domorder(w.Block))
if v.Op != OpLocalAddr || c != 0 { if c != 0 {
return c return c
} }
// compare the memory args for OpLocalAddrs in the same block if v.Op == OpLocalAddr {
vm := v.Args[1] // compare the memory args for OpLocalAddrs in the same block
wm := w.Args[1] vm := v.Args[1]
if vm == wm { wm := w.Args[1]
return 0 if vm == wm {
return 0
}
// if the two OpLocalAddrs are in the same block, and one's memory
// arg also in the same block, but the other one's memory arg not,
// the latter must be in an ancestor block
if vm.Block != v.Block {
return -1
}
if wm.Block != w.Block {
return +1
}
// use store order if the memory args are in the same block
vs := storeOrdering(vm, o)
ws := storeOrdering(wm, o)
if vs <= 0 {
f.Fatalf("unable to determine the order of %s", vm.LongString())
}
if ws <= 0 {
f.Fatalf("unable to determine the order of %s", wm.LongString())
}
return cmp.Compare(vs, ws)
} }
// if the two OpLocalAddrs are in the same block, and one's memory vStmt := v.Pos.IsStmt() == src.PosIsStmt
// arg also in the same block, but the other one's memory arg not, wStmt := w.Pos.IsStmt() == src.PosIsStmt
// the latter must be in an ancestor block if vStmt != wStmt {
if vm.Block != v.Block { if vStmt {
return -1 return -1
} }
if wm.Block != w.Block {
return +1 return +1
} }
// use store order if the memory args are in the same block return 0
vs := storeOrdering(vm, o)
ws := storeOrdering(wm, o)
if vs <= 0 {
f.Fatalf("unable to determine the order of %s", vm.LongString())
}
if ws <= 0 {
f.Fatalf("unable to determine the order of %s", wm.LongString())
}
return cmp.Compare(vs, ws)
}) })
for i := 0; i < len(e)-1; i++ { for i := 0; i < len(e)-1; i++ {

View file

@ -375,11 +375,12 @@ const (
auxPanicBoundsCC // two constants for a bounds failure auxPanicBoundsCC // two constants for a bounds failure
// architecture specific aux types // architecture specific aux types
auxARM64BitField // aux is an arm64 bitfield lsb and width packed into auxInt auxARM64BitField // aux is an arm64 bitfield lsb and width packed into auxInt
auxS390XRotateParams // aux is a s390x rotate parameters object encoding start bit, end bit and rotate amount auxARM64ConditionalParams // aux is a structure, which contains condition, NZCV flags and constant with indicator of using it
auxS390XCCMask // aux is a s390x 4-bit condition code mask auxS390XRotateParams // aux is a s390x rotate parameters object encoding start bit, end bit and rotate amount
auxS390XCCMaskInt8 // aux is a s390x 4-bit condition code mask, auxInt is an int8 immediate auxS390XCCMask // aux is a s390x 4-bit condition code mask
auxS390XCCMaskUint8 // aux is a s390x 4-bit condition code mask, auxInt is a uint8 immediate auxS390XCCMaskInt8 // aux is a s390x 4-bit condition code mask, auxInt is an int8 immediate
auxS390XCCMaskUint8 // aux is a s390x 4-bit condition code mask, auxInt is a uint8 immediate
) )
// A SymEffect describes the effect that an SSA Value has on the variable // A SymEffect describes the effect that an SSA Value has on the variable
@ -534,3 +535,11 @@ func (b BoundsKind) Code() (rtabi.BoundsErrorCode, bool) {
// width+lsb<64 for 64-bit variant, width+lsb<32 for 32-bit variant. // width+lsb<64 for 64-bit variant, width+lsb<32 for 32-bit variant.
// the meaning of width and lsb are instruction-dependent. // the meaning of width and lsb are instruction-dependent.
type arm64BitField int16 type arm64BitField int16
// arm64ConditionalParams is the GO type of ARM64ConditionalParams auxInt.
type arm64ConditionalParams struct {
cond Op // Condition code to evaluate
nzcv uint8 // Fallback NZCV flags value when condition is false
constValue uint8 // Immediate value for constant comparisons
ind bool // Constant comparison indicator
}

View file

@ -3563,6 +3563,14 @@ const (
OpARM64CSINV OpARM64CSINV
OpARM64CSNEG OpARM64CSNEG
OpARM64CSETM OpARM64CSETM
OpARM64CCMP
OpARM64CCMN
OpARM64CCMPconst
OpARM64CCMNconst
OpARM64CCMPW
OpARM64CCMNW
OpARM64CCMPWconst
OpARM64CCMNWconst
OpARM64CALLstatic OpARM64CALLstatic
OpARM64CALLtail OpARM64CALLtail
OpARM64CALLclosure OpARM64CALLclosure
@ -3660,11 +3668,14 @@ const (
OpLOONG64VPCNT16 OpLOONG64VPCNT16
OpLOONG64ADDV OpLOONG64ADDV
OpLOONG64ADDVconst OpLOONG64ADDVconst
OpLOONG64ADDV16const
OpLOONG64SUBV OpLOONG64SUBV
OpLOONG64SUBVconst OpLOONG64SUBVconst
OpLOONG64MULV OpLOONG64MULV
OpLOONG64MULHV OpLOONG64MULHV
OpLOONG64MULHVU OpLOONG64MULHVU
OpLOONG64MULH
OpLOONG64MULHU
OpLOONG64DIVV OpLOONG64DIVV
OpLOONG64DIVVU OpLOONG64DIVVU
OpLOONG64REMV OpLOONG64REMV
@ -3792,11 +3803,10 @@ const (
OpLOONG64CALLtail OpLOONG64CALLtail
OpLOONG64CALLclosure OpLOONG64CALLclosure
OpLOONG64CALLinter OpLOONG64CALLinter
OpLOONG64DUFFZERO
OpLOONG64LoweredZero OpLOONG64LoweredZero
OpLOONG64DUFFCOPY
OpLOONG64LoweredZeroLoop OpLOONG64LoweredZeroLoop
OpLOONG64LoweredMove OpLOONG64LoweredMove
OpLOONG64LoweredMoveLoop
OpLOONG64LoweredAtomicLoad8 OpLOONG64LoweredAtomicLoad8
OpLOONG64LoweredAtomicLoad32 OpLOONG64LoweredAtomicLoad32
OpLOONG64LoweredAtomicLoad64 OpLOONG64LoweredAtomicLoad64
@ -4020,10 +4030,7 @@ const (
OpMIPS64MOVVstore OpMIPS64MOVVstore
OpMIPS64MOVFstore OpMIPS64MOVFstore
OpMIPS64MOVDstore OpMIPS64MOVDstore
OpMIPS64MOVBstorezero OpMIPS64ZERO
OpMIPS64MOVHstorezero
OpMIPS64MOVWstorezero
OpMIPS64MOVVstorezero
OpMIPS64MOVWfpgp OpMIPS64MOVWfpgp
OpMIPS64MOVWgpfp OpMIPS64MOVWgpfp
OpMIPS64MOVVfpgp OpMIPS64MOVVfpgp
@ -4429,10 +4436,10 @@ const (
OpRISCV64CALLtail OpRISCV64CALLtail
OpRISCV64CALLclosure OpRISCV64CALLclosure
OpRISCV64CALLinter OpRISCV64CALLinter
OpRISCV64DUFFZERO
OpRISCV64DUFFCOPY
OpRISCV64LoweredZero OpRISCV64LoweredZero
OpRISCV64LoweredZeroLoop
OpRISCV64LoweredMove OpRISCV64LoweredMove
OpRISCV64LoweredMoveLoop
OpRISCV64LoweredAtomicLoad8 OpRISCV64LoweredAtomicLoad8
OpRISCV64LoweredAtomicLoad32 OpRISCV64LoweredAtomicLoad32
OpRISCV64LoweredAtomicLoad64 OpRISCV64LoweredAtomicLoad64
@ -54564,6 +54571,98 @@ var opcodeTable = [...]opInfo{
}, },
}, },
}, },
{
name: "CCMP",
auxType: auxARM64ConditionalParams,
argLen: 3,
asm: arm64.ACCMP,
reg: regInfo{
inputs: []inputInfo{
{0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
{1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
},
},
},
{
name: "CCMN",
auxType: auxARM64ConditionalParams,
argLen: 3,
asm: arm64.ACCMN,
reg: regInfo{
inputs: []inputInfo{
{0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
{1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
},
},
},
{
name: "CCMPconst",
auxType: auxARM64ConditionalParams,
argLen: 2,
asm: arm64.ACCMP,
reg: regInfo{
inputs: []inputInfo{
{0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
},
},
},
{
name: "CCMNconst",
auxType: auxARM64ConditionalParams,
argLen: 2,
asm: arm64.ACCMN,
reg: regInfo{
inputs: []inputInfo{
{0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
},
},
},
{
name: "CCMPW",
auxType: auxARM64ConditionalParams,
argLen: 3,
asm: arm64.ACCMPW,
reg: regInfo{
inputs: []inputInfo{
{0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
{1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
},
},
},
{
name: "CCMNW",
auxType: auxARM64ConditionalParams,
argLen: 3,
asm: arm64.ACCMNW,
reg: regInfo{
inputs: []inputInfo{
{0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
{1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
},
},
},
{
name: "CCMPWconst",
auxType: auxARM64ConditionalParams,
argLen: 2,
asm: arm64.ACCMPW,
reg: regInfo{
inputs: []inputInfo{
{0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
},
},
},
{
name: "CCMNWconst",
auxType: auxARM64ConditionalParams,
argLen: 2,
asm: arm64.ACCMNW,
reg: regInfo{
inputs: []inputInfo{
{0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
},
},
},
{ {
name: "CALLstatic", name: "CALLstatic",
auxType: auxCallOff, auxType: auxCallOff,
@ -55803,6 +55902,20 @@ var opcodeTable = [...]opInfo{
}, },
}, },
}, },
{
name: "ADDV16const",
auxType: auxInt64,
argLen: 1,
asm: loong64.AADDV16,
reg: regInfo{
inputs: []inputInfo{
{0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
{ {
name: "SUBV", name: "SUBV",
argLen: 2, argLen: 2,
@ -55876,6 +55989,36 @@ var opcodeTable = [...]opInfo{
}, },
}, },
}, },
{
name: "MULH",
argLen: 2,
commutative: true,
asm: loong64.AMULH,
reg: regInfo{
inputs: []inputInfo{
{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
{1, 1073741817}, // ZERO R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
{
name: "MULHU",
argLen: 2,
commutative: true,
asm: loong64.AMULHU,
reg: regInfo{
inputs: []inputInfo{
{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
{1, 1073741817}, // ZERO R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
},
outputs: []outputInfo{
{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
},
},
},
{ {
name: "DIVV", name: "DIVV",
argLen: 2, argLen: 2,
@ -57638,18 +57781,6 @@ var opcodeTable = [...]opInfo{
clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
}, },
}, },
{
name: "DUFFZERO",
auxType: auxInt64,
argLen: 2,
faultOnNilArg0: true,
reg: regInfo{
inputs: []inputInfo{
{0, 524288}, // R20
},
clobbers: 524290, // R1 R20
},
},
{ {
name: "LoweredZero", name: "LoweredZero",
auxType: auxInt64, auxType: auxInt64,
@ -57661,20 +57792,6 @@ var opcodeTable = [...]opInfo{
}, },
}, },
}, },
{
name: "DUFFCOPY",
auxType: auxInt64,
argLen: 3,
faultOnNilArg0: true,
faultOnNilArg1: true,
reg: regInfo{
inputs: []inputInfo{
{0, 1048576}, // R21
{1, 524288}, // R20
},
clobbers: 1572866, // R1 R20 R21
},
},
{ {
name: "LoweredZeroLoop", name: "LoweredZeroLoop",
auxType: auxInt64, auxType: auxInt64,
@ -57691,16 +57808,31 @@ var opcodeTable = [...]opInfo{
{ {
name: "LoweredMove", name: "LoweredMove",
auxType: auxInt64, auxType: auxInt64,
argLen: 4, argLen: 3,
faultOnNilArg0: true, faultOnNilArg0: true,
faultOnNilArg1: true, faultOnNilArg1: true,
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{0, 1048576}, // R21 {0, 1071120376}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R21 R23 R24 R25 R26 R27 R28 R29 R31
{1, 524288}, // R20 {1, 1071120376}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R21 R23 R24 R25 R26 R27 R28 R29 R31
{2, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
}, },
clobbers: 1572864, // R20 R21 clobbers: 524288, // R20
},
},
{
name: "LoweredMoveLoop",
auxType: auxInt64,
argLen: 3,
faultOnNilArg0: true,
faultOnNilArg1: true,
reg: regInfo{
inputs: []inputInfo{
{0, 1070071800}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R23 R24 R25 R26 R27 R28 R29 R31
{1, 1070071800}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R23 R24 R25 R26 R27 R28 R29 R31
},
clobbers: 1572864, // R20 R21
clobbersArg0: true,
clobbersArg1: true,
}, },
}, },
{ {
@ -59796,7 +59928,7 @@ var opcodeTable = [...]opInfo{
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
}, },
outputs: []outputInfo{ outputs: []outputInfo{
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
@ -59824,7 +59956,7 @@ var opcodeTable = [...]opInfo{
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
}, },
outputs: []outputInfo{ outputs: []outputInfo{
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
@ -60031,7 +60163,7 @@ var opcodeTable = [...]opInfo{
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
}, },
outputs: []outputInfo{ outputs: []outputInfo{
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
@ -60060,7 +60192,7 @@ var opcodeTable = [...]opInfo{
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
}, },
outputs: []outputInfo{ outputs: []outputInfo{
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
@ -60089,7 +60221,7 @@ var opcodeTable = [...]opInfo{
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
}, },
outputs: []outputInfo{ outputs: []outputInfo{
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
@ -60118,7 +60250,7 @@ var opcodeTable = [...]opInfo{
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
}, },
outputs: []outputInfo{ outputs: []outputInfo{
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
@ -60223,7 +60355,7 @@ var opcodeTable = [...]opInfo{
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
}, },
outputs: []outputInfo{ outputs: []outputInfo{
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
@ -60251,7 +60383,7 @@ var opcodeTable = [...]opInfo{
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
}, },
outputs: []outputInfo{ outputs: []outputInfo{
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
@ -60279,7 +60411,7 @@ var opcodeTable = [...]opInfo{
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
}, },
outputs: []outputInfo{ outputs: []outputInfo{
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
@ -60307,7 +60439,7 @@ var opcodeTable = [...]opInfo{
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
}, },
outputs: []outputInfo{ outputs: []outputInfo{
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
@ -60335,7 +60467,7 @@ var opcodeTable = [...]opInfo{
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
}, },
outputs: []outputInfo{ outputs: []outputInfo{
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
@ -60627,7 +60759,7 @@ var opcodeTable = [...]opInfo{
asm: mips.AMOVB, asm: mips.AMOVB,
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
}, },
}, },
@ -60641,7 +60773,7 @@ var opcodeTable = [...]opInfo{
asm: mips.AMOVH, asm: mips.AMOVH,
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
}, },
}, },
@ -60655,7 +60787,7 @@ var opcodeTable = [...]opInfo{
asm: mips.AMOVW, asm: mips.AMOVW,
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
}, },
}, },
@ -60669,7 +60801,7 @@ var opcodeTable = [...]opInfo{
asm: mips.AMOVV, asm: mips.AMOVV,
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
}, },
}, },
@ -60703,56 +60835,11 @@ var opcodeTable = [...]opInfo{
}, },
}, },
{ {
name: "MOVBstorezero", name: "ZERO",
auxType: auxSymOff, argLen: 0,
argLen: 2, zeroWidth: true,
faultOnNilArg0: true, fixedReg: true,
symEffect: SymWrite, reg: regInfo{},
asm: mips.AMOVB,
reg: regInfo{
inputs: []inputInfo{
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
},
},
},
{
name: "MOVHstorezero",
auxType: auxSymOff,
argLen: 2,
faultOnNilArg0: true,
symEffect: SymWrite,
asm: mips.AMOVH,
reg: regInfo{
inputs: []inputInfo{
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
},
},
},
{
name: "MOVWstorezero",
auxType: auxSymOff,
argLen: 2,
faultOnNilArg0: true,
symEffect: SymWrite,
asm: mips.AMOVW,
reg: regInfo{
inputs: []inputInfo{
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
},
},
},
{
name: "MOVVstorezero",
auxType: auxSymOff,
argLen: 2,
faultOnNilArg0: true,
symEffect: SymWrite,
asm: mips.AMOVV,
reg: regInfo{
inputs: []inputInfo{
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
},
},
}, },
{ {
name: "MOVWfpgp", name: "MOVWfpgp",
@ -61153,7 +61240,7 @@ var opcodeTable = [...]opInfo{
asm: mips.AAND, asm: mips.AAND,
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
}, },
}, },
@ -61167,7 +61254,7 @@ var opcodeTable = [...]opInfo{
asm: mips.AOR, asm: mips.AOR,
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
}, },
}, },
@ -61218,7 +61305,7 @@ var opcodeTable = [...]opInfo{
hasSideEffects: true, hasSideEffects: true,
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
}, },
}, },
@ -61230,7 +61317,7 @@ var opcodeTable = [...]opInfo{
hasSideEffects: true, hasSideEffects: true,
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
}, },
}, },
@ -61242,7 +61329,7 @@ var opcodeTable = [...]opInfo{
hasSideEffects: true, hasSideEffects: true,
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 {1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
}, },
}, },
@ -66181,57 +66268,61 @@ var opcodeTable = [...]opInfo{
}, },
}, },
{ {
name: "DUFFZERO", name: "LoweredZero",
auxType: auxInt64, auxType: auxSymValAndOff,
argLen: 2, argLen: 2,
faultOnNilArg0: true, faultOnNilArg0: true,
symEffect: SymWrite,
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{0, 16777216}, // X25 {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
}, },
clobbers: 16777216, // X25
}, },
}, },
{ {
name: "DUFFCOPY", name: "LoweredZeroLoop",
auxType: auxInt64, auxType: auxSymValAndOff,
argLen: 2,
needIntTemp: true,
faultOnNilArg0: true,
symEffect: SymWrite,
reg: regInfo{
inputs: []inputInfo{
{0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
clobbersArg0: true,
},
},
{
name: "LoweredMove",
auxType: auxSymValAndOff,
argLen: 3, argLen: 3,
faultOnNilArg0: true, faultOnNilArg0: true,
faultOnNilArg1: true, faultOnNilArg1: true,
symEffect: SymWrite,
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{0, 16777216}, // X25 {0, 1006632928}, // X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
{1, 8388608}, // X24 {1, 1006632928}, // X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
clobbers: 25165824, // X24 X25
},
},
{
name: "LoweredZero",
auxType: auxInt64,
argLen: 3,
faultOnNilArg0: true,
reg: regInfo{
inputs: []inputInfo{
{0, 16}, // X5
{1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
}, },
clobbers: 16, // X5 clobbers: 16, // X5
}, },
}, },
{ {
name: "LoweredMove", name: "LoweredMoveLoop",
auxType: auxInt64, auxType: auxSymValAndOff,
argLen: 4, argLen: 3,
faultOnNilArg0: true, faultOnNilArg0: true,
faultOnNilArg1: true, faultOnNilArg1: true,
symEffect: SymWrite,
reg: regInfo{ reg: regInfo{
inputs: []inputInfo{ inputs: []inputInfo{
{0, 16}, // X5 {0, 1006632896}, // X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
{1, 32}, // X6 {1, 1006632896}, // X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
{2, 1006632880}, // X5 X6 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
}, },
clobbers: 112, // X5 X6 X7 clobbers: 48, // X5 X6
clobbersArg0: true,
clobbersArg1: true,
}, },
}, },
{ {
@ -81864,7 +81955,7 @@ var specialRegMaskMIPS = regMask(105553116266496)
var framepointerRegMIPS = int8(-1) var framepointerRegMIPS = int8(-1)
var linkRegMIPS = int8(28) var linkRegMIPS = int8(28)
var registersMIPS64 = [...]Register{ var registersMIPS64 = [...]Register{
{0, mips.REG_R0, "R0"}, {0, mips.REGZERO, "ZERO"},
{1, mips.REG_R1, "R1"}, {1, mips.REG_R1, "R1"},
{2, mips.REG_R2, "R2"}, {2, mips.REG_R2, "R2"},
{3, mips.REG_R3, "R3"}, {3, mips.REG_R3, "R3"},

View file

@ -1438,7 +1438,7 @@ func (s *regAllocState) regalloc(f *Func) {
case OpSB: case OpSB:
s.assignReg(s.SBReg, v, v) s.assignReg(s.SBReg, v, v)
s.sb = v.ID s.sb = v.ID
case OpARM64ZERO, OpLOONG64ZERO: case OpARM64ZERO, OpLOONG64ZERO, OpMIPS64ZERO:
s.assignReg(s.ZeroIntReg, v, v) s.assignReg(s.ZeroIntReg, v, v)
case OpAMD64Zero128, OpAMD64Zero256, OpAMD64Zero512: case OpAMD64Zero128, OpAMD64Zero256, OpAMD64Zero512:
regspec := s.regspec(v) regspec := s.regspec(v)

View file

@ -8,6 +8,7 @@ import (
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/logopt" "cmd/compile/internal/logopt"
"cmd/compile/internal/reflectdata" "cmd/compile/internal/reflectdata"
"cmd/compile/internal/rttype"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/obj" "cmd/internal/obj"
"cmd/internal/obj/s390x" "cmd/internal/obj/s390x"
@ -668,6 +669,17 @@ func auxIntToValAndOff(i int64) ValAndOff {
func auxIntToArm64BitField(i int64) arm64BitField { func auxIntToArm64BitField(i int64) arm64BitField {
return arm64BitField(i) return arm64BitField(i)
} }
func auxIntToArm64ConditionalParams(i int64) arm64ConditionalParams {
var params arm64ConditionalParams
params.cond = Op(i & 0xffff)
i >>= 16
params.nzcv = uint8(i & 0x0f)
i >>= 4
params.constValue = uint8(i & 0x1f)
i >>= 5
params.ind = i == 1
return params
}
func auxIntToFlagConstant(x int64) flagConstant { func auxIntToFlagConstant(x int64) flagConstant {
return flagConstant(x) return flagConstant(x)
} }
@ -709,6 +721,20 @@ func valAndOffToAuxInt(v ValAndOff) int64 {
func arm64BitFieldToAuxInt(v arm64BitField) int64 { func arm64BitFieldToAuxInt(v arm64BitField) int64 {
return int64(v) return int64(v)
} }
func arm64ConditionalParamsToAuxInt(v arm64ConditionalParams) int64 {
if v.cond&^0xffff != 0 {
panic("condition value exceeds 16 bits")
}
var i int64
if v.ind {
i = 1 << 25
}
i |= int64(v.constValue) << 20
i |= int64(v.nzcv) << 16
i |= int64(v.cond)
return i
}
func flagConstantToAuxInt(x flagConstant) int64 { func flagConstantToAuxInt(x flagConstant) int64 {
return int64(x) return int64(x)
} }
@ -1898,6 +1924,43 @@ func arm64BFWidth(mask, rshift int64) int64 {
return nto(shiftedMask) return nto(shiftedMask)
} }
// encodes condition code and NZCV flags into auxint.
func arm64ConditionalParamsAuxInt(cond Op, nzcv uint8) arm64ConditionalParams {
if cond < OpARM64Equal || cond > OpARM64GreaterEqualU {
panic("Wrong conditional operation")
}
if nzcv&0x0f != nzcv {
panic("Wrong value of NZCV flag")
}
return arm64ConditionalParams{cond, nzcv, 0, false}
}
// encodes condition code, NZCV flags and constant value into auxint.
func arm64ConditionalParamsAuxIntWithValue(cond Op, nzcv uint8, value uint8) arm64ConditionalParams {
if value&0x1f != value {
panic("Wrong value of constant")
}
params := arm64ConditionalParamsAuxInt(cond, nzcv)
params.constValue = value
params.ind = true
return params
}
// extracts condition code from auxint.
func (condParams arm64ConditionalParams) Cond() Op {
return condParams.cond
}
// extracts NZCV flags from auxint.
func (condParams arm64ConditionalParams) Nzcv() int64 {
return int64(condParams.nzcv)
}
// extracts constant value from auxint if present.
func (condParams arm64ConditionalParams) ConstValue() (int64, bool) {
return int64(condParams.constValue), condParams.ind
}
// registerizable reports whether t is a primitive type that fits in // registerizable reports whether t is a primitive type that fits in
// a register. It assumes float64 values will always fit into registers // a register. It assumes float64 values will always fit into registers
// even if that isn't strictly true. // even if that isn't strictly true.
@ -1981,74 +2044,128 @@ func symIsROZero(sym Sym) bool {
return true return true
} }
// isFixed32 returns true if the int32 at offset off in symbol sym // isFixedLoad returns true if the load can be resolved to fixed address or constant,
// is known and constant. // and can be rewritten by rewriteFixedLoad.
func isFixed32(c *Config, sym Sym, off int64) bool { func isFixedLoad(v *Value, sym Sym, off int64) bool {
return isFixed(c, sym, off, 4)
}
// isFixed returns true if the range [off,off+size] of the symbol sym
// is known and constant.
func isFixed(c *Config, sym Sym, off, size int64) bool {
lsym := sym.(*obj.LSym) lsym := sym.(*obj.LSym)
if lsym.Extra == nil { if (v.Type.IsPtrShaped() || v.Type.IsUintptr()) && lsym.Type == objabi.SRODATA {
for _, r := range lsym.R {
if (r.Type == objabi.R_ADDR || r.Type == objabi.R_WEAKADDR) && int64(r.Off) == off && r.Add == 0 {
return true
}
}
return false return false
} }
if _, ok := (*lsym.Extra).(*obj.TypeInfo); ok {
if off == 2*c.PtrSize && size == 4 {
return true // type hash field
}
}
return false
}
func fixed32(c *Config, sym Sym, off int64) int32 {
lsym := sym.(*obj.LSym)
if ti, ok := (*lsym.Extra).(*obj.TypeInfo); ok {
if off == 2*c.PtrSize {
return int32(types.TypeHash(ti.Type.(*types.Type)))
}
}
base.Fatalf("fixed32 data not known for %s:%d", sym, off)
return 0
}
// isFixedSym returns true if the contents of sym at the given offset if strings.HasPrefix(lsym.Name, "type:") {
// is known and is the constant address of another symbol. // Type symbols do not contain information about their fields, unlike the cases above.
func isFixedSym(sym Sym, off int64) bool { // Hand-implement field accesses.
lsym := sym.(*obj.LSym) // TODO: can this be replaced with reflectdata.writeType and just use the code above?
switch {
case lsym.Type == objabi.SRODATA: t := (*lsym.Extra).(*obj.TypeInfo).Type.(*types.Type)
// itabs, dictionaries
default: for _, f := range rttype.Type.Fields() {
return false if f.Offset == off && copyCompatibleType(v.Type, f.Type) {
} switch f.Sym.Name {
for _, r := range lsym.R { case "Size_", "PtrBytes", "Hash", "Kind_":
if (r.Type == objabi.R_ADDR || r.Type == objabi.R_WEAKADDR) && int64(r.Off) == off && r.Add == 0 { return true
default:
// fmt.Println("unknown field", f.Sym.Name)
return false
}
}
}
if t.IsPtr() && off == rttype.PtrType.OffsetOf("Elem") {
return true return true
} }
return false
} }
return false return false
} }
func fixedSym(f *Func, sym Sym, off int64) Sym {
// rewriteFixedLoad rewrites a load to a fixed address or constant, if isFixedLoad returns true.
func rewriteFixedLoad(v *Value, sym Sym, sb *Value, off int64) *Value {
b := v.Block
f := b.Func
lsym := sym.(*obj.LSym) lsym := sym.(*obj.LSym)
for _, r := range lsym.R { if (v.Type.IsPtrShaped() || v.Type.IsUintptr()) && lsym.Type == objabi.SRODATA {
if (r.Type == objabi.R_ADDR || r.Type == objabi.R_WEAKADDR) && int64(r.Off) == off { for _, r := range lsym.R {
if strings.HasPrefix(r.Sym.Name, "type:") { if (r.Type == objabi.R_ADDR || r.Type == objabi.R_WEAKADDR) && int64(r.Off) == off && r.Add == 0 {
// In case we're loading a type out of a dictionary, we need to record if strings.HasPrefix(r.Sym.Name, "type:") {
// that the containing function might put that type in an interface. // In case we're loading a type out of a dictionary, we need to record
// That information is currently recorded in relocations in the dictionary, // that the containing function might put that type in an interface.
// but if we perform this load at compile time then the dictionary // That information is currently recorded in relocations in the dictionary,
// might be dead. // but if we perform this load at compile time then the dictionary
reflectdata.MarkTypeSymUsedInInterface(r.Sym, f.fe.Func().Linksym()) // might be dead.
} else if strings.HasPrefix(r.Sym.Name, "go:itab") { reflectdata.MarkTypeSymUsedInInterface(r.Sym, f.fe.Func().Linksym())
// Same, but if we're using an itab we need to record that the } else if strings.HasPrefix(r.Sym.Name, "go:itab") {
// itab._type might be put in an interface. // Same, but if we're using an itab we need to record that the
reflectdata.MarkTypeSymUsedInInterface(r.Sym, f.fe.Func().Linksym()) // itab._type might be put in an interface.
reflectdata.MarkTypeSymUsedInInterface(r.Sym, f.fe.Func().Linksym())
}
v.reset(OpAddr)
v.Aux = symToAux(r.Sym)
v.AddArg(sb)
return v
} }
return r.Sym
} }
base.Fatalf("fixedLoad data not known for %s:%d", sym, off)
} }
base.Fatalf("fixedSym data not known for %s:%d", sym, off)
if strings.HasPrefix(lsym.Name, "type:") {
// Type symbols do not contain information about their fields, unlike the cases above.
// Hand-implement field accesses.
// TODO: can this be replaced with reflectdata.writeType and just use the code above?
t := (*lsym.Extra).(*obj.TypeInfo).Type.(*types.Type)
ptrSizedOpConst := OpConst64
if f.Config.PtrSize == 4 {
ptrSizedOpConst = OpConst32
}
for _, f := range rttype.Type.Fields() {
if f.Offset == off && copyCompatibleType(v.Type, f.Type) {
switch f.Sym.Name {
case "Size_":
v.reset(ptrSizedOpConst)
v.AuxInt = int64(t.Size())
return v
case "PtrBytes":
v.reset(ptrSizedOpConst)
v.AuxInt = int64(types.PtrDataSize(t))
return v
case "Hash":
v.reset(OpConst32)
v.AuxInt = int64(types.TypeHash(t))
return v
case "Kind_":
v.reset(OpConst8)
v.AuxInt = int64(reflectdata.ABIKindOfType(t))
return v
default:
base.Fatalf("unknown field %s for fixedLoad of %s at offset %d", f.Sym.Name, lsym.Name, off)
}
}
}
if t.IsPtr() && off == rttype.PtrType.OffsetOf("Elem") {
elemSym := reflectdata.TypeLinksym(t.Elem())
reflectdata.MarkTypeSymUsedInInterface(elemSym, f.fe.Func().Linksym())
v.reset(OpAddr)
v.Aux = symToAux(elemSym)
v.AddArg(sb)
return v
}
base.Fatalf("fixedLoad data not known for %s:%d", sym, off)
}
base.Fatalf("fixedLoad data not known for %s:%d", sym, off)
return nil return nil
} }

View file

@ -296,9 +296,11 @@ func rewriteValueLOONG64(v *Value) bool {
v.Op = OpLOONG64LoweredGetClosurePtr v.Op = OpLOONG64LoweredGetClosurePtr
return true return true
case OpHmul32: case OpHmul32:
return rewriteValueLOONG64_OpHmul32(v) v.Op = OpLOONG64MULH
return true
case OpHmul32u: case OpHmul32u:
return rewriteValueLOONG64_OpHmul32u(v) v.Op = OpLOONG64MULHU
return true
case OpHmul64: case OpHmul64:
v.Op = OpLOONG64MULHV v.Op = OpLOONG64MULHV
return true return true
@ -322,6 +324,8 @@ func rewriteValueLOONG64(v *Value) bool {
return rewriteValueLOONG64_OpLOONG64ADDV(v) return rewriteValueLOONG64_OpLOONG64ADDV(v)
case OpLOONG64ADDVconst: case OpLOONG64ADDVconst:
return rewriteValueLOONG64_OpLOONG64ADDVconst(v) return rewriteValueLOONG64_OpLOONG64ADDVconst(v)
case OpLOONG64ADDshiftLLV:
return rewriteValueLOONG64_OpLOONG64ADDshiftLLV(v)
case OpLOONG64AND: case OpLOONG64AND:
return rewriteValueLOONG64_OpLOONG64AND(v) return rewriteValueLOONG64_OpLOONG64AND(v)
case OpLOONG64ANDconst: case OpLOONG64ANDconst:
@ -1576,50 +1580,6 @@ func rewriteValueLOONG64_OpEqPtr(v *Value) bool {
return true return true
} }
} }
func rewriteValueLOONG64_OpHmul32(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
// match: (Hmul32 x y)
// result: (SRAVconst (MULV (SignExt32to64 x) (SignExt32to64 y)) [32])
for {
x := v_0
y := v_1
v.reset(OpLOONG64SRAVconst)
v.AuxInt = int64ToAuxInt(32)
v0 := b.NewValue0(v.Pos, OpLOONG64MULV, typ.Int64)
v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v1.AddArg(x)
v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v2.AddArg(y)
v0.AddArg2(v1, v2)
v.AddArg(v0)
return true
}
}
func rewriteValueLOONG64_OpHmul32u(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
// match: (Hmul32u x y)
// result: (SRLVconst (MULV (ZeroExt32to64 x) (ZeroExt32to64 y)) [32])
for {
x := v_0
y := v_1
v.reset(OpLOONG64SRLVconst)
v.AuxInt = int64ToAuxInt(32)
v0 := b.NewValue0(v.Pos, OpLOONG64MULV, typ.Int64)
v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v1.AddArg(x)
v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v2.AddArg(y)
v0.AddArg2(v1, v2)
v.AddArg(v0)
return true
}
}
func rewriteValueLOONG64_OpIsInBounds(v *Value) bool { func rewriteValueLOONG64_OpIsInBounds(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
@ -2050,6 +2010,43 @@ func rewriteValueLOONG64_OpLOONG64ADDVconst(v *Value) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (ADDVconst [c] x)
// cond: is32Bit(c) && c&0xffff == 0 && c != 0
// result: (ADDV16const [c] x)
for {
c := auxIntToInt64(v.AuxInt)
x := v_0
if !(is32Bit(c) && c&0xffff == 0 && c != 0) {
break
}
v.reset(OpLOONG64ADDV16const)
v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
return false
}
func rewriteValueLOONG64_OpLOONG64ADDshiftLLV(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ADDshiftLLV x (MOVVconst [c]) [d])
// cond: is12Bit(c<<d)
// result: (ADDVconst x [c<<d])
for {
d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpLOONG64MOVVconst {
break
}
c := auxIntToInt64(v_1.AuxInt)
if !(is12Bit(c << d)) {
break
}
v.reset(OpLOONG64ADDVconst)
v.AuxInt = int64ToAuxInt(c << d)
v.AddArg(x)
return true
}
return false return false
} }
func rewriteValueLOONG64_OpLOONG64AND(v *Value) bool { func rewriteValueLOONG64_OpLOONG64AND(v *Value) bool {
@ -2371,6 +2368,23 @@ func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool {
v_0 := v.Args[0] v_0 := v.Args[0]
b := v.Block b := v.Block
config := b.Func.Config config := b.Func.Config
// match: (MOVBUload [off] {sym} ptr (MOVBstore [off] {sym} ptr x _))
// result: (MOVBUreg x)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpLOONG64MOVBstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
break
}
x := v_1.Args[1]
if ptr != v_1.Args[0] {
break
}
v.reset(OpLOONG64MOVBUreg)
v.AddArg(x)
return true
}
// match: (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) // match: (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVBUload [off1+int32(off2)] {sym} ptr mem) // result: (MOVBUload [off1+int32(off2)] {sym} ptr mem)
@ -2433,6 +2447,19 @@ func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool {
v.AddArg3(ptr, idx, mem) v.AddArg3(ptr, idx, mem)
return true return true
} }
// match: (MOVBUload [off] {sym} (SB) _)
// cond: symIsRO(sym)
// result: (MOVVconst [int64(read8(sym, int64(off)))])
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpLOONG64MOVVconst)
v.AuxInt = int64ToAuxInt(int64(read8(sym, int64(off))))
return true
}
return false return false
} }
func rewriteValueLOONG64_OpLOONG64MOVBUloadidx(v *Value) bool { func rewriteValueLOONG64_OpLOONG64MOVBUloadidx(v *Value) bool {
@ -2648,6 +2675,23 @@ func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool {
v_0 := v.Args[0] v_0 := v.Args[0]
b := v.Block b := v.Block
config := b.Func.Config config := b.Func.Config
// match: (MOVBload [off] {sym} ptr (MOVBstore [off] {sym} ptr x _))
// result: (MOVBreg x)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpLOONG64MOVBstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
break
}
x := v_1.Args[1]
if ptr != v_1.Args[0] {
break
}
v.reset(OpLOONG64MOVBreg)
v.AddArg(x)
return true
}
// match: (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) // match: (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVBload [off1+int32(off2)] {sym} ptr mem) // result: (MOVBload [off1+int32(off2)] {sym} ptr mem)
@ -2710,6 +2754,19 @@ func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool {
v.AddArg3(ptr, idx, mem) v.AddArg3(ptr, idx, mem)
return true return true
} }
// match: (MOVBload [off] {sym} (SB) _)
// cond: symIsRO(sym)
// result: (MOVVconst [int64(int8(read8(sym, int64(off))))])
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpLOONG64MOVVconst)
v.AuxInt = int64ToAuxInt(int64(int8(read8(sym, int64(off)))))
return true
}
return false return false
} }
func rewriteValueLOONG64_OpLOONG64MOVBloadidx(v *Value) bool { func rewriteValueLOONG64_OpLOONG64MOVBloadidx(v *Value) bool {
@ -3568,6 +3625,23 @@ func rewriteValueLOONG64_OpLOONG64MOVHUload(v *Value) bool {
v_0 := v.Args[0] v_0 := v.Args[0]
b := v.Block b := v.Block
config := b.Func.Config config := b.Func.Config
// match: (MOVHUload [off] {sym} ptr (MOVHstore [off] {sym} ptr x _))
// result: (MOVHUreg x)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpLOONG64MOVHstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
break
}
x := v_1.Args[1]
if ptr != v_1.Args[0] {
break
}
v.reset(OpLOONG64MOVHUreg)
v.AddArg(x)
return true
}
// match: (MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) // match: (MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVHUload [off1+int32(off2)] {sym} ptr mem) // result: (MOVHUload [off1+int32(off2)] {sym} ptr mem)
@ -3630,6 +3704,19 @@ func rewriteValueLOONG64_OpLOONG64MOVHUload(v *Value) bool {
v.AddArg3(ptr, idx, mem) v.AddArg3(ptr, idx, mem)
return true return true
} }
// match: (MOVHUload [off] {sym} (SB) _)
// cond: symIsRO(sym)
// result: (MOVVconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpLOONG64MOVVconst)
v.AuxInt = int64ToAuxInt(int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
return true
}
return false return false
} }
func rewriteValueLOONG64_OpLOONG64MOVHUloadidx(v *Value) bool { func rewriteValueLOONG64_OpLOONG64MOVHUloadidx(v *Value) bool {
@ -3807,6 +3894,23 @@ func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool {
v_0 := v.Args[0] v_0 := v.Args[0]
b := v.Block b := v.Block
config := b.Func.Config config := b.Func.Config
// match: (MOVHload [off] {sym} ptr (MOVHstore [off] {sym} ptr x _))
// result: (MOVHreg x)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpLOONG64MOVHstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
break
}
x := v_1.Args[1]
if ptr != v_1.Args[0] {
break
}
v.reset(OpLOONG64MOVHreg)
v.AddArg(x)
return true
}
// match: (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) // match: (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVHload [off1+int32(off2)] {sym} ptr mem) // result: (MOVHload [off1+int32(off2)] {sym} ptr mem)
@ -3869,6 +3973,19 @@ func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool {
v.AddArg3(ptr, idx, mem) v.AddArg3(ptr, idx, mem)
return true return true
} }
// match: (MOVHload [off] {sym} (SB) _)
// cond: symIsRO(sym)
// result: (MOVVconst [int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpLOONG64MOVVconst)
v.AuxInt = int64ToAuxInt(int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))))
return true
}
return false return false
} }
func rewriteValueLOONG64_OpLOONG64MOVHloadidx(v *Value) bool { func rewriteValueLOONG64_OpLOONG64MOVHloadidx(v *Value) bool {
@ -4250,6 +4367,23 @@ func rewriteValueLOONG64_OpLOONG64MOVVload(v *Value) bool {
v.AddArg(val) v.AddArg(val)
return true return true
} }
// match: (MOVVload [off] {sym} ptr (MOVVstore [off] {sym} ptr x _))
// result: (MOVVreg x)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpLOONG64MOVVstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
break
}
x := v_1.Args[1]
if ptr != v_1.Args[0] {
break
}
v.reset(OpLOONG64MOVVreg)
v.AddArg(x)
return true
}
// match: (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) // match: (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVVload [off1+int32(off2)] {sym} ptr mem) // result: (MOVVload [off1+int32(off2)] {sym} ptr mem)
@ -4312,6 +4446,19 @@ func rewriteValueLOONG64_OpLOONG64MOVVload(v *Value) bool {
v.AddArg3(ptr, idx, mem) v.AddArg3(ptr, idx, mem)
return true return true
} }
// match: (MOVVload [off] {sym} (SB) _)
// cond: symIsRO(sym)
// result: (MOVVconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpLOONG64MOVVconst)
v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
return true
}
return false return false
} }
func rewriteValueLOONG64_OpLOONG64MOVVloadidx(v *Value) bool { func rewriteValueLOONG64_OpLOONG64MOVVloadidx(v *Value) bool {
@ -4558,6 +4705,23 @@ func rewriteValueLOONG64_OpLOONG64MOVWUload(v *Value) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (MOVWUload [off] {sym} ptr (MOVWstore [off] {sym} ptr x _))
// result: (MOVWUreg x)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpLOONG64MOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
break
}
x := v_1.Args[1]
if ptr != v_1.Args[0] {
break
}
v.reset(OpLOONG64MOVWUreg)
v.AddArg(x)
return true
}
// match: (MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) // match: (MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVWUload [off1+int32(off2)] {sym} ptr mem) // result: (MOVWUload [off1+int32(off2)] {sym} ptr mem)
@ -4620,6 +4784,19 @@ func rewriteValueLOONG64_OpLOONG64MOVWUload(v *Value) bool {
v.AddArg3(ptr, idx, mem) v.AddArg3(ptr, idx, mem)
return true return true
} }
// match: (MOVWUload [off] {sym} (SB) _)
// cond: symIsRO(sym)
// result: (MOVVconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpLOONG64MOVVconst)
v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
return true
}
return false return false
} }
func rewriteValueLOONG64_OpLOONG64MOVWUloadidx(v *Value) bool { func rewriteValueLOONG64_OpLOONG64MOVWUloadidx(v *Value) bool {
@ -4830,6 +5007,23 @@ func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool {
v_0 := v.Args[0] v_0 := v.Args[0]
b := v.Block b := v.Block
config := b.Func.Config config := b.Func.Config
// match: (MOVWload [off] {sym} ptr (MOVWstore [off] {sym} ptr x _))
// result: (MOVWreg x)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpLOONG64MOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
break
}
x := v_1.Args[1]
if ptr != v_1.Args[0] {
break
}
v.reset(OpLOONG64MOVWreg)
v.AddArg(x)
return true
}
// match: (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) // match: (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVWload [off1+int32(off2)] {sym} ptr mem) // result: (MOVWload [off1+int32(off2)] {sym} ptr mem)
@ -4892,6 +5086,19 @@ func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool {
v.AddArg3(ptr, idx, mem) v.AddArg3(ptr, idx, mem)
return true return true
} }
// match: (MOVWload [off] {sym} (SB) _)
// cond: symIsRO(sym)
// result: (MOVVconst [int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpLOONG64MOVVconst)
v.AuxInt = int64ToAuxInt(int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))))
return true
}
return false return false
} }
func rewriteValueLOONG64_OpLOONG64MOVWloadidx(v *Value) bool { func rewriteValueLOONG64_OpLOONG64MOVWloadidx(v *Value) bool {
@ -9056,62 +9263,35 @@ func rewriteValueLOONG64_OpMove(v *Value) bool {
return true return true
} }
// match: (Move [s] dst src mem) // match: (Move [s] dst src mem)
// cond: s%8 != 0 && s > 16 // cond: s > 16 && s < 192 && logLargeCopy(v, s)
// result: (Move [s%8] (OffPtr <dst.Type> dst [s-s%8]) (OffPtr <src.Type> src [s-s%8]) (Move [s-s%8] dst src mem)) // result: (LoweredMove [s] dst src mem)
for { for {
s := auxIntToInt64(v.AuxInt) s := auxIntToInt64(v.AuxInt)
dst := v_0 dst := v_0
src := v_1 src := v_1
mem := v_2 mem := v_2
if !(s%8 != 0 && s > 16) { if !(s > 16 && s < 192 && logLargeCopy(v, s)) {
break break
} }
v.reset(OpMove) v.reset(OpLOONG64LoweredMove)
v.AuxInt = int64ToAuxInt(s % 8) v.AuxInt = int64ToAuxInt(s)
v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
v0.AuxInt = int64ToAuxInt(s - s%8)
v0.AddArg(dst)
v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
v1.AuxInt = int64ToAuxInt(s - s%8)
v1.AddArg(src)
v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
v2.AuxInt = int64ToAuxInt(s - s%8)
v2.AddArg3(dst, src, mem)
v.AddArg3(v0, v1, v2)
return true
}
// match: (Move [s] dst src mem)
// cond: s%8 == 0 && s > 16 && s <= 8*128 && logLargeCopy(v, s)
// result: (DUFFCOPY [16 * (128 - s/8)] dst src mem)
for {
s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
if !(s%8 == 0 && s > 16 && s <= 8*128 && logLargeCopy(v, s)) {
break
}
v.reset(OpLOONG64DUFFCOPY)
v.AuxInt = int64ToAuxInt(16 * (128 - s/8))
v.AddArg3(dst, src, mem) v.AddArg3(dst, src, mem)
return true return true
} }
// match: (Move [s] dst src mem) // match: (Move [s] dst src mem)
// cond: s%8 == 0 && s > 1024 && logLargeCopy(v, s) // cond: s >= 192 && logLargeCopy(v, s)
// result: (LoweredMove dst src (ADDVconst <src.Type> src [s-8]) mem) // result: (LoweredMoveLoop [s] dst src mem)
for { for {
s := auxIntToInt64(v.AuxInt) s := auxIntToInt64(v.AuxInt)
dst := v_0 dst := v_0
src := v_1 src := v_1
mem := v_2 mem := v_2
if !(s%8 == 0 && s > 1024 && logLargeCopy(v, s)) { if !(s >= 192 && logLargeCopy(v, s)) {
break break
} }
v.reset(OpLOONG64LoweredMove) v.reset(OpLOONG64LoweredMoveLoop)
v0 := b.NewValue0(v.Pos, OpLOONG64ADDVconst, src.Type) v.AuxInt = int64ToAuxInt(s)
v0.AuxInt = int64ToAuxInt(s - 8) v.AddArg3(dst, src, mem)
v0.AddArg(src)
v.AddArg4(dst, src, v0, mem)
return true return true
} }
return false return false

View file

@ -332,8 +332,6 @@ func rewriteValueMIPS64(v *Value) bool {
return rewriteValueMIPS64_OpMIPS64MOVBreg(v) return rewriteValueMIPS64_OpMIPS64MOVBreg(v)
case OpMIPS64MOVBstore: case OpMIPS64MOVBstore:
return rewriteValueMIPS64_OpMIPS64MOVBstore(v) return rewriteValueMIPS64_OpMIPS64MOVBstore(v)
case OpMIPS64MOVBstorezero:
return rewriteValueMIPS64_OpMIPS64MOVBstorezero(v)
case OpMIPS64MOVDload: case OpMIPS64MOVDload:
return rewriteValueMIPS64_OpMIPS64MOVDload(v) return rewriteValueMIPS64_OpMIPS64MOVDload(v)
case OpMIPS64MOVDstore: case OpMIPS64MOVDstore:
@ -352,8 +350,6 @@ func rewriteValueMIPS64(v *Value) bool {
return rewriteValueMIPS64_OpMIPS64MOVHreg(v) return rewriteValueMIPS64_OpMIPS64MOVHreg(v)
case OpMIPS64MOVHstore: case OpMIPS64MOVHstore:
return rewriteValueMIPS64_OpMIPS64MOVHstore(v) return rewriteValueMIPS64_OpMIPS64MOVHstore(v)
case OpMIPS64MOVHstorezero:
return rewriteValueMIPS64_OpMIPS64MOVHstorezero(v)
case OpMIPS64MOVVload: case OpMIPS64MOVVload:
return rewriteValueMIPS64_OpMIPS64MOVVload(v) return rewriteValueMIPS64_OpMIPS64MOVVload(v)
case OpMIPS64MOVVnop: case OpMIPS64MOVVnop:
@ -362,8 +358,6 @@ func rewriteValueMIPS64(v *Value) bool {
return rewriteValueMIPS64_OpMIPS64MOVVreg(v) return rewriteValueMIPS64_OpMIPS64MOVVreg(v)
case OpMIPS64MOVVstore: case OpMIPS64MOVVstore:
return rewriteValueMIPS64_OpMIPS64MOVVstore(v) return rewriteValueMIPS64_OpMIPS64MOVVstore(v)
case OpMIPS64MOVVstorezero:
return rewriteValueMIPS64_OpMIPS64MOVVstorezero(v)
case OpMIPS64MOVWUload: case OpMIPS64MOVWUload:
return rewriteValueMIPS64_OpMIPS64MOVWUload(v) return rewriteValueMIPS64_OpMIPS64MOVWUload(v)
case OpMIPS64MOVWUreg: case OpMIPS64MOVWUreg:
@ -374,8 +368,6 @@ func rewriteValueMIPS64(v *Value) bool {
return rewriteValueMIPS64_OpMIPS64MOVWreg(v) return rewriteValueMIPS64_OpMIPS64MOVWreg(v)
case OpMIPS64MOVWstore: case OpMIPS64MOVWstore:
return rewriteValueMIPS64_OpMIPS64MOVWstore(v) return rewriteValueMIPS64_OpMIPS64MOVWstore(v)
case OpMIPS64MOVWstorezero:
return rewriteValueMIPS64_OpMIPS64MOVWstorezero(v)
case OpMIPS64NEGV: case OpMIPS64NEGV:
return rewriteValueMIPS64_OpMIPS64NEGV(v) return rewriteValueMIPS64_OpMIPS64NEGV(v)
case OpMIPS64NOR: case OpMIPS64NOR:
@ -3095,22 +3087,6 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool {
v.AddArg3(ptr, val, mem) v.AddArg3(ptr, val, mem)
return true return true
} }
// match: (MOVBstore [off] {sym} ptr (MOVVconst [0]) mem)
// result: (MOVBstorezero [off] {sym} ptr mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
break
}
mem := v_2
v.reset(OpMIPS64MOVBstorezero)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
// result: (MOVBstore [off] {sym} ptr x mem) // result: (MOVBstore [off] {sym} ptr x mem)
for { for {
@ -3215,56 +3191,6 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool {
} }
return false return false
} }
func rewriteValueMIPS64_OpMIPS64MOVBstorezero(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
config := b.Func.Config
// match: (MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
if v_0.Op != OpMIPS64ADDVconst {
break
}
off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(OpMIPS64MOVBstorezero)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
if v_0.Op != OpMIPS64MOVVaddr {
break
}
off2 := auxIntToInt32(v_0.AuxInt)
sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(OpMIPS64MOVBstorezero)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
return false
}
func rewriteValueMIPS64_OpMIPS64MOVDload(v *Value) bool { func rewriteValueMIPS64_OpMIPS64MOVDload(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
@ -3856,22 +3782,6 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool {
v.AddArg3(ptr, val, mem) v.AddArg3(ptr, val, mem)
return true return true
} }
// match: (MOVHstore [off] {sym} ptr (MOVVconst [0]) mem)
// result: (MOVHstorezero [off] {sym} ptr mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
break
}
mem := v_2
v.reset(OpMIPS64MOVHstorezero)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
// result: (MOVHstore [off] {sym} ptr x mem) // result: (MOVHstore [off] {sym} ptr x mem)
for { for {
@ -3942,56 +3852,6 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool {
} }
return false return false
} }
func rewriteValueMIPS64_OpMIPS64MOVHstorezero(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
config := b.Func.Config
// match: (MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
if v_0.Op != OpMIPS64ADDVconst {
break
}
off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(OpMIPS64MOVHstorezero)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
if v_0.Op != OpMIPS64MOVVaddr {
break
}
off2 := auxIntToInt32(v_0.AuxInt)
sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(OpMIPS64MOVHstorezero)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
return false
}
func rewriteValueMIPS64_OpMIPS64MOVVload(v *Value) bool { func rewriteValueMIPS64_OpMIPS64MOVVload(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
@ -4182,72 +4042,6 @@ func rewriteValueMIPS64_OpMIPS64MOVVstore(v *Value) bool {
v.AddArg3(ptr, val, mem) v.AddArg3(ptr, val, mem)
return true return true
} }
// match: (MOVVstore [off] {sym} ptr (MOVVconst [0]) mem)
// result: (MOVVstorezero [off] {sym} ptr mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
break
}
mem := v_2
v.reset(OpMIPS64MOVVstorezero)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
return false
}
func rewriteValueMIPS64_OpMIPS64MOVVstorezero(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
config := b.Func.Config
// match: (MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVVstorezero [off1+int32(off2)] {sym} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
if v_0.Op != OpMIPS64ADDVconst {
break
}
off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(OpMIPS64MOVVstorezero)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
if v_0.Op != OpMIPS64MOVVaddr {
break
}
off2 := auxIntToInt32(v_0.AuxInt)
sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(OpMIPS64MOVVstorezero)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
return false return false
} }
func rewriteValueMIPS64_OpMIPS64MOVWUload(v *Value) bool { func rewriteValueMIPS64_OpMIPS64MOVWUload(v *Value) bool {
@ -4659,22 +4453,6 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool {
v.AddArg3(ptr, val, mem) v.AddArg3(ptr, val, mem)
return true return true
} }
// match: (MOVWstore [off] {sym} ptr (MOVVconst [0]) mem)
// result: (MOVWstorezero [off] {sym} ptr mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
break
}
mem := v_2
v.reset(OpMIPS64MOVWstorezero)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
// result: (MOVWstore [off] {sym} ptr x mem) // result: (MOVWstore [off] {sym} ptr x mem)
for { for {
@ -4711,56 +4489,6 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool {
} }
return false return false
} }
func rewriteValueMIPS64_OpMIPS64MOVWstorezero(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
config := b.Func.Config
// match: (MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
if v_0.Op != OpMIPS64ADDVconst {
break
}
off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(OpMIPS64MOVWstorezero)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
if v_0.Op != OpMIPS64MOVVaddr {
break
}
off2 := auxIntToInt32(v_0.AuxInt)
sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
v.reset(OpMIPS64MOVWstorezero)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
return false
}
func rewriteValueMIPS64_OpMIPS64NEGV(v *Value) bool { func rewriteValueMIPS64_OpMIPS64NEGV(v *Value) bool {
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (NEGV (SUBV x y)) // match: (NEGV (SUBV x y))

View file

@ -0,0 +1,26 @@
// Code generated from _gen/MIPS64latelower.rules using 'go generate'; DO NOT EDIT.
package ssa
func rewriteValueMIPS64latelower(v *Value) bool {
switch v.Op {
case OpMIPS64MOVVconst:
return rewriteValueMIPS64latelower_OpMIPS64MOVVconst(v)
}
return false
}
func rewriteValueMIPS64latelower_OpMIPS64MOVVconst(v *Value) bool {
// match: (MOVVconst [0])
// result: (ZERO)
for {
if auxIntToInt64(v.AuxInt) != 0 {
break
}
v.reset(OpMIPS64ZERO)
return true
}
return false
}
func rewriteBlockMIPS64latelower(b *Block) bool {
return false
}

View file

@ -3090,169 +3090,38 @@ func rewriteValueRISCV64_OpMove(v *Value) bool {
v.AddArg3(dst, v0, v1) v.AddArg3(dst, v0, v1)
return true return true
} }
// match: (Move [12] {t} dst src mem)
// cond: t.Alignment()%4 == 0
// result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)))
for {
if auxIntToInt64(v.AuxInt) != 12 {
break
}
t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
if !(t.Alignment()%4 == 0) {
break
}
v.reset(OpRISCV64MOVWstore)
v.AuxInt = int32ToAuxInt(8)
v0 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
v0.AuxInt = int32ToAuxInt(8)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
v1.AuxInt = int32ToAuxInt(4)
v2 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
v2.AuxInt = int32ToAuxInt(4)
v2.AddArg2(src, mem)
v3 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
v4 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
v4.AddArg2(src, mem)
v3.AddArg3(dst, v4, mem)
v1.AddArg3(dst, v2, v3)
v.AddArg3(dst, v0, v1)
return true
}
// match: (Move [16] {t} dst src mem)
// cond: t.Alignment()%8 == 0
// result: (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))
for {
if auxIntToInt64(v.AuxInt) != 16 {
break
}
t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
if !(t.Alignment()%8 == 0) {
break
}
v.reset(OpRISCV64MOVDstore)
v.AuxInt = int32ToAuxInt(8)
v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
v0.AuxInt = int32ToAuxInt(8)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
v2 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
v2.AddArg2(src, mem)
v1.AddArg3(dst, v2, mem)
v.AddArg3(dst, v0, v1)
return true
}
// match: (Move [24] {t} dst src mem)
// cond: t.Alignment()%8 == 0
// result: (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)))
for {
if auxIntToInt64(v.AuxInt) != 24 {
break
}
t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
if !(t.Alignment()%8 == 0) {
break
}
v.reset(OpRISCV64MOVDstore)
v.AuxInt = int32ToAuxInt(16)
v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
v0.AuxInt = int32ToAuxInt(16)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
v1.AuxInt = int32ToAuxInt(8)
v2 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
v2.AuxInt = int32ToAuxInt(8)
v2.AddArg2(src, mem)
v3 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
v4 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
v4.AddArg2(src, mem)
v3.AddArg3(dst, v4, mem)
v1.AddArg3(dst, v2, v3)
v.AddArg3(dst, v0, v1)
return true
}
// match: (Move [32] {t} dst src mem)
// cond: t.Alignment()%8 == 0
// result: (MOVDstore [24] dst (MOVDload [24] src mem) (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))))
for {
if auxIntToInt64(v.AuxInt) != 32 {
break
}
t := auxToType(v.Aux)
dst := v_0
src := v_1
mem := v_2
if !(t.Alignment()%8 == 0) {
break
}
v.reset(OpRISCV64MOVDstore)
v.AuxInt = int32ToAuxInt(24)
v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
v0.AuxInt = int32ToAuxInt(24)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
v1.AuxInt = int32ToAuxInt(16)
v2 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
v2.AuxInt = int32ToAuxInt(16)
v2.AddArg2(src, mem)
v3 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
v3.AuxInt = int32ToAuxInt(8)
v4 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
v4.AuxInt = int32ToAuxInt(8)
v4.AddArg2(src, mem)
v5 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
v6 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
v6.AddArg2(src, mem)
v5.AddArg3(dst, v6, mem)
v3.AddArg3(dst, v4, v5)
v1.AddArg3(dst, v2, v3)
v.AddArg3(dst, v0, v1)
return true
}
// match: (Move [s] {t} dst src mem) // match: (Move [s] {t} dst src mem)
// cond: s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && logLargeCopy(v, s) // cond: s > 0 && s <= 3*8*moveSize(t.Alignment(), config) && logLargeCopy(v, s)
// result: (DUFFCOPY [16 * (128 - s/8)] dst src mem) // result: (LoweredMove [makeValAndOff(int32(s),int32(t.Alignment()))] dst src mem)
for { for {
s := auxIntToInt64(v.AuxInt) s := auxIntToInt64(v.AuxInt)
t := auxToType(v.Aux) t := auxToType(v.Aux)
dst := v_0 dst := v_0
src := v_1 src := v_1
mem := v_2 mem := v_2
if !(s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && logLargeCopy(v, s)) { if !(s > 0 && s <= 3*8*moveSize(t.Alignment(), config) && logLargeCopy(v, s)) {
break break
} }
v.reset(OpRISCV64DUFFCOPY) v.reset(OpRISCV64LoweredMove)
v.AuxInt = int64ToAuxInt(16 * (128 - s/8)) v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s), int32(t.Alignment())))
v.AddArg3(dst, src, mem) v.AddArg3(dst, src, mem)
return true return true
} }
// match: (Move [s] {t} dst src mem) // match: (Move [s] {t} dst src mem)
// cond: (s <= 16 || logLargeCopy(v, s)) // cond: s > 3*8*moveSize(t.Alignment(), config) && logLargeCopy(v, s)
// result: (LoweredMove [t.Alignment()] dst src (ADDI <src.Type> [s-moveSize(t.Alignment(), config)] src) mem) // result: (LoweredMoveLoop [makeValAndOff(int32(s),int32(t.Alignment()))] dst src mem)
for { for {
s := auxIntToInt64(v.AuxInt) s := auxIntToInt64(v.AuxInt)
t := auxToType(v.Aux) t := auxToType(v.Aux)
dst := v_0 dst := v_0
src := v_1 src := v_1
mem := v_2 mem := v_2
if !(s <= 16 || logLargeCopy(v, s)) { if !(s > 3*8*moveSize(t.Alignment(), config) && logLargeCopy(v, s)) {
break break
} }
v.reset(OpRISCV64LoweredMove) v.reset(OpRISCV64LoweredMoveLoop)
v.AuxInt = int64ToAuxInt(t.Alignment()) v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s), int32(t.Alignment())))
v0 := b.NewValue0(v.Pos, OpRISCV64ADDI, src.Type) v.AddArg3(dst, src, mem)
v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config))
v0.AddArg(src)
v.AddArg4(dst, src, v0, mem)
return true return true
} }
return false return false
@ -4740,6 +4609,25 @@ func rewriteValueRISCV64_OpRISCV64MOVBUload(v *Value) bool {
v.AddArg2(base, mem) v.AddArg2(base, mem)
return true return true
} }
// match: (MOVBUload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _))
// cond: isSamePtr(ptr1, ptr2)
// result: (MOVBUreg x)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr1 := v_0
if v_1.Op != OpRISCV64MOVBstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
break
}
x := v_1.Args[1]
ptr2 := v_1.Args[0]
if !(isSamePtr(ptr1, ptr2)) {
break
}
v.reset(OpRISCV64MOVBUreg)
v.AddArg(x)
return true
}
return false return false
} }
func rewriteValueRISCV64_OpRISCV64MOVBUreg(v *Value) bool { func rewriteValueRISCV64_OpRISCV64MOVBUreg(v *Value) bool {
@ -5049,6 +4937,25 @@ func rewriteValueRISCV64_OpRISCV64MOVBload(v *Value) bool {
v.AddArg2(base, mem) v.AddArg2(base, mem)
return true return true
} }
// match: (MOVBload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _))
// cond: isSamePtr(ptr1, ptr2)
// result: (MOVBreg x)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr1 := v_0
if v_1.Op != OpRISCV64MOVBstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
break
}
x := v_1.Args[1]
ptr2 := v_1.Args[0]
if !(isSamePtr(ptr1, ptr2)) {
break
}
v.reset(OpRISCV64MOVBreg)
v.AddArg(x)
return true
}
return false return false
} }
func rewriteValueRISCV64_OpRISCV64MOVBreg(v *Value) bool { func rewriteValueRISCV64_OpRISCV64MOVBreg(v *Value) bool {
@ -5397,6 +5304,25 @@ func rewriteValueRISCV64_OpRISCV64MOVDload(v *Value) bool {
v.AddArg2(base, mem) v.AddArg2(base, mem)
return true return true
} }
// match: (MOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _))
// cond: isSamePtr(ptr1, ptr2)
// result: (MOVDreg x)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr1 := v_0
if v_1.Op != OpRISCV64MOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
break
}
x := v_1.Args[1]
ptr2 := v_1.Args[0]
if !(isSamePtr(ptr1, ptr2)) {
break
}
v.reset(OpRISCV64MOVDreg)
v.AddArg(x)
return true
}
// match: (MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) // match: (MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _))
// cond: isSamePtr(ptr1, ptr2) // cond: isSamePtr(ptr1, ptr2)
// result: (FMVXD x) // result: (FMVXD x)
@ -5616,6 +5542,25 @@ func rewriteValueRISCV64_OpRISCV64MOVHUload(v *Value) bool {
v.AddArg2(base, mem) v.AddArg2(base, mem)
return true return true
} }
// match: (MOVHUload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _))
// cond: isSamePtr(ptr1, ptr2)
// result: (MOVHUreg x)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr1 := v_0
if v_1.Op != OpRISCV64MOVHstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
break
}
x := v_1.Args[1]
ptr2 := v_1.Args[0]
if !(isSamePtr(ptr1, ptr2)) {
break
}
v.reset(OpRISCV64MOVHUreg)
v.AddArg(x)
return true
}
return false return false
} }
func rewriteValueRISCV64_OpRISCV64MOVHUreg(v *Value) bool { func rewriteValueRISCV64_OpRISCV64MOVHUreg(v *Value) bool {
@ -5782,6 +5727,25 @@ func rewriteValueRISCV64_OpRISCV64MOVHload(v *Value) bool {
v.AddArg2(base, mem) v.AddArg2(base, mem)
return true return true
} }
// match: (MOVHload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _))
// cond: isSamePtr(ptr1, ptr2)
// result: (MOVHreg x)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr1 := v_0
if v_1.Op != OpRISCV64MOVHstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
break
}
x := v_1.Args[1]
ptr2 := v_1.Args[0]
if !(isSamePtr(ptr1, ptr2)) {
break
}
v.reset(OpRISCV64MOVHreg)
v.AddArg(x)
return true
}
return false return false
} }
func rewriteValueRISCV64_OpRISCV64MOVHreg(v *Value) bool { func rewriteValueRISCV64_OpRISCV64MOVHreg(v *Value) bool {
@ -6141,6 +6105,25 @@ func rewriteValueRISCV64_OpRISCV64MOVWUload(v *Value) bool {
v.AddArg2(base, mem) v.AddArg2(base, mem)
return true return true
} }
// match: (MOVWUload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _))
// cond: isSamePtr(ptr1, ptr2)
// result: (MOVWUreg x)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr1 := v_0
if v_1.Op != OpRISCV64MOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
break
}
x := v_1.Args[1]
ptr2 := v_1.Args[0]
if !(isSamePtr(ptr1, ptr2)) {
break
}
v.reset(OpRISCV64MOVWUreg)
v.AddArg(x)
return true
}
// match: (MOVWUload [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _)) // match: (MOVWUload [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _))
// cond: isSamePtr(ptr1, ptr2) // cond: isSamePtr(ptr1, ptr2)
// result: (MOVWUreg (FMVXS x)) // result: (MOVWUreg (FMVXS x))
@ -6352,6 +6335,25 @@ func rewriteValueRISCV64_OpRISCV64MOVWload(v *Value) bool {
v.AddArg2(base, mem) v.AddArg2(base, mem)
return true return true
} }
// match: (MOVWload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _))
// cond: isSamePtr(ptr1, ptr2)
// result: (MOVWreg x)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr1 := v_0
if v_1.Op != OpRISCV64MOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
break
}
x := v_1.Args[1]
ptr2 := v_1.Args[0]
if !(isSamePtr(ptr1, ptr2)) {
break
}
v.reset(OpRISCV64MOVWreg)
v.AddArg(x)
return true
}
// match: (MOVWload [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _)) // match: (MOVWload [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _))
// cond: isSamePtr(ptr1, ptr2) // cond: isSamePtr(ptr1, ptr2)
// result: (FMVXS x) // result: (FMVXS x)
@ -9792,138 +9794,39 @@ func rewriteValueRISCV64_OpZero(v *Value) bool {
v.AddArg3(ptr, v0, v1) v.AddArg3(ptr, v0, v1)
return true return true
} }
// match: (Zero [12] {t} ptr mem)
// cond: t.Alignment()%4 == 0
// result: (MOVWstore [8] ptr (MOVDconst [0]) (MOVWstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem)))
for {
if auxIntToInt64(v.AuxInt) != 12 {
break
}
t := auxToType(v.Aux)
ptr := v_0
mem := v_1
if !(t.Alignment()%4 == 0) {
break
}
v.reset(OpRISCV64MOVWstore)
v.AuxInt = int32ToAuxInt(8)
v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
v0.AuxInt = int64ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
v1.AuxInt = int32ToAuxInt(4)
v2 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
v2.AddArg3(ptr, v0, mem)
v1.AddArg3(ptr, v0, v2)
v.AddArg3(ptr, v0, v1)
return true
}
// match: (Zero [16] {t} ptr mem)
// cond: t.Alignment()%8 == 0
// result: (MOVDstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))
for {
if auxIntToInt64(v.AuxInt) != 16 {
break
}
t := auxToType(v.Aux)
ptr := v_0
mem := v_1
if !(t.Alignment()%8 == 0) {
break
}
v.reset(OpRISCV64MOVDstore)
v.AuxInt = int32ToAuxInt(8)
v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
v0.AuxInt = int64ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
v1.AddArg3(ptr, v0, mem)
v.AddArg3(ptr, v0, v1)
return true
}
// match: (Zero [24] {t} ptr mem)
// cond: t.Alignment()%8 == 0
// result: (MOVDstore [16] ptr (MOVDconst [0]) (MOVDstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)))
for {
if auxIntToInt64(v.AuxInt) != 24 {
break
}
t := auxToType(v.Aux)
ptr := v_0
mem := v_1
if !(t.Alignment()%8 == 0) {
break
}
v.reset(OpRISCV64MOVDstore)
v.AuxInt = int32ToAuxInt(16)
v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
v0.AuxInt = int64ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
v1.AuxInt = int32ToAuxInt(8)
v2 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
v2.AddArg3(ptr, v0, mem)
v1.AddArg3(ptr, v0, v2)
v.AddArg3(ptr, v0, v1)
return true
}
// match: (Zero [32] {t} ptr mem)
// cond: t.Alignment()%8 == 0
// result: (MOVDstore [24] ptr (MOVDconst [0]) (MOVDstore [16] ptr (MOVDconst [0]) (MOVDstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))))
for {
if auxIntToInt64(v.AuxInt) != 32 {
break
}
t := auxToType(v.Aux)
ptr := v_0
mem := v_1
if !(t.Alignment()%8 == 0) {
break
}
v.reset(OpRISCV64MOVDstore)
v.AuxInt = int32ToAuxInt(24)
v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
v0.AuxInt = int64ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
v1.AuxInt = int32ToAuxInt(16)
v2 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
v2.AuxInt = int32ToAuxInt(8)
v3 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
v3.AddArg3(ptr, v0, mem)
v2.AddArg3(ptr, v0, v3)
v1.AddArg3(ptr, v0, v2)
v.AddArg3(ptr, v0, v1)
return true
}
// match: (Zero [s] {t} ptr mem) // match: (Zero [s] {t} ptr mem)
// cond: s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 // cond: s <= 24*moveSize(t.Alignment(), config)
// result: (DUFFZERO [8 * (128 - s/8)] ptr mem) // result: (LoweredZero [makeValAndOff(int32(s),int32(t.Alignment()))] ptr mem)
for { for {
s := auxIntToInt64(v.AuxInt) s := auxIntToInt64(v.AuxInt)
t := auxToType(v.Aux) t := auxToType(v.Aux)
ptr := v_0 ptr := v_0
mem := v_1 mem := v_1
if !(s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0) { if !(s <= 24*moveSize(t.Alignment(), config)) {
break break
} }
v.reset(OpRISCV64DUFFZERO) v.reset(OpRISCV64LoweredZero)
v.AuxInt = int64ToAuxInt(8 * (128 - s/8)) v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s), int32(t.Alignment())))
v.AddArg2(ptr, mem) v.AddArg2(ptr, mem)
return true return true
} }
// match: (Zero [s] {t} ptr mem) // match: (Zero [s] {t} ptr mem)
// result: (LoweredZero [t.Alignment()] ptr (ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.Alignment(), config)])) mem) // cond: s > 24*moveSize(t.Alignment(), config)
// result: (LoweredZeroLoop [makeValAndOff(int32(s),int32(t.Alignment()))] ptr mem)
for { for {
s := auxIntToInt64(v.AuxInt) s := auxIntToInt64(v.AuxInt)
t := auxToType(v.Aux) t := auxToType(v.Aux)
ptr := v_0 ptr := v_0
mem := v_1 mem := v_1
v.reset(OpRISCV64LoweredZero) if !(s > 24*moveSize(t.Alignment(), config)) {
v.AuxInt = int64ToAuxInt(t.Alignment()) break
v0 := b.NewValue0(v.Pos, OpRISCV64ADD, ptr.Type) }
v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) v.reset(OpRISCV64LoweredZeroLoop)
v1.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config)) v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s), int32(t.Alignment())))
v0.AddArg2(ptr, v1) v.AddArg2(ptr, mem)
v.AddArg3(ptr, v0, mem)
return true return true
} }
return false
} }
func rewriteBlockRISCV64(b *Block) bool { func rewriteBlockRISCV64(b *Block) bool {
typ := &b.Func.Config.Types typ := &b.Func.Config.Types

View file

@ -14089,8 +14089,6 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
b := v.Block b := v.Block
config := b.Func.Config
typ := &b.Func.Config.Types
// match: (Load <t1> p1 (Store {t2} p2 x _)) // match: (Load <t1> p1 (Store {t2} p2 x _))
// cond: isSamePtr(p1, p2) && copyCompatibleType(t1, x.Type) && t1.Size() == t2.Size() // cond: isSamePtr(p1, p2) && copyCompatibleType(t1, x.Type) && t1.Size() == t2.Size()
// result: x // result: x
@ -14675,11 +14673,95 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (Load <typ.BytePtr> (OffPtr [off] (Addr {s} sb) ) _) // match: (Load (Addr {s} sb) _)
// cond: isFixedSym(s, off) // cond: isFixedLoad(v, s, 0)
// result: (Addr {fixedSym(b.Func, s, off)} sb) // result: rewriteFixedLoad(v, s, sb, 0)
for { for {
if v.Type != typ.BytePtr || v_0.Op != OpOffPtr { if v_0.Op != OpAddr {
break
}
s := auxToSym(v_0.Aux)
sb := v_0.Args[0]
if !(isFixedLoad(v, s, 0)) {
break
}
v.copyOf(rewriteFixedLoad(v, s, sb, 0))
return true
}
// match: (Load (Convert (Addr {s} sb) _) _)
// cond: isFixedLoad(v, s, 0)
// result: rewriteFixedLoad(v, s, sb, 0)
for {
if v_0.Op != OpConvert {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpAddr {
break
}
s := auxToSym(v_0_0.Aux)
sb := v_0_0.Args[0]
if !(isFixedLoad(v, s, 0)) {
break
}
v.copyOf(rewriteFixedLoad(v, s, sb, 0))
return true
}
// match: (Load (ITab (IMake (Addr {s} sb) _)) _)
// cond: isFixedLoad(v, s, 0)
// result: rewriteFixedLoad(v, s, sb, 0)
for {
if v_0.Op != OpITab {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpIMake {
break
}
v_0_0_0 := v_0_0.Args[0]
if v_0_0_0.Op != OpAddr {
break
}
s := auxToSym(v_0_0_0.Aux)
sb := v_0_0_0.Args[0]
if !(isFixedLoad(v, s, 0)) {
break
}
v.copyOf(rewriteFixedLoad(v, s, sb, 0))
return true
}
// match: (Load (ITab (IMake (Convert (Addr {s} sb) _) _)) _)
// cond: isFixedLoad(v, s, 0)
// result: rewriteFixedLoad(v, s, sb, 0)
for {
if v_0.Op != OpITab {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpIMake {
break
}
v_0_0_0 := v_0_0.Args[0]
if v_0_0_0.Op != OpConvert {
break
}
v_0_0_0_0 := v_0_0_0.Args[0]
if v_0_0_0_0.Op != OpAddr {
break
}
s := auxToSym(v_0_0_0_0.Aux)
sb := v_0_0_0_0.Args[0]
if !(isFixedLoad(v, s, 0)) {
break
}
v.copyOf(rewriteFixedLoad(v, s, sb, 0))
return true
}
// match: (Load (OffPtr [off] (Addr {s} sb) ) _)
// cond: isFixedLoad(v, s, off)
// result: rewriteFixedLoad(v, s, sb, off)
for {
if v_0.Op != OpOffPtr {
break break
} }
off := auxIntToInt64(v_0.AuxInt) off := auxIntToInt64(v_0.AuxInt)
@ -14689,19 +14771,17 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
} }
s := auxToSym(v_0_0.Aux) s := auxToSym(v_0_0.Aux)
sb := v_0_0.Args[0] sb := v_0_0.Args[0]
if !(isFixedSym(s, off)) { if !(isFixedLoad(v, s, off)) {
break break
} }
v.reset(OpAddr) v.copyOf(rewriteFixedLoad(v, s, sb, off))
v.Aux = symToAux(fixedSym(b.Func, s, off))
v.AddArg(sb)
return true return true
} }
// match: (Load <typ.BytePtr> (OffPtr [off] (Convert (Addr {s} sb) _) ) _) // match: (Load (OffPtr [off] (Convert (Addr {s} sb) _) ) _)
// cond: isFixedSym(s, off) // cond: isFixedLoad(v, s, off)
// result: (Addr {fixedSym(b.Func, s, off)} sb) // result: rewriteFixedLoad(v, s, sb, off)
for { for {
if v.Type != typ.BytePtr || v_0.Op != OpOffPtr { if v_0.Op != OpOffPtr {
break break
} }
off := auxIntToInt64(v_0.AuxInt) off := auxIntToInt64(v_0.AuxInt)
@ -14715,19 +14795,17 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
} }
s := auxToSym(v_0_0_0.Aux) s := auxToSym(v_0_0_0.Aux)
sb := v_0_0_0.Args[0] sb := v_0_0_0.Args[0]
if !(isFixedSym(s, off)) { if !(isFixedLoad(v, s, off)) {
break break
} }
v.reset(OpAddr) v.copyOf(rewriteFixedLoad(v, s, sb, off))
v.Aux = symToAux(fixedSym(b.Func, s, off))
v.AddArg(sb)
return true return true
} }
// match: (Load <typ.BytePtr> (OffPtr [off] (ITab (IMake (Addr {s} sb) _))) _) // match: (Load (OffPtr [off] (ITab (IMake (Addr {s} sb) _))) _)
// cond: isFixedSym(s, off) // cond: isFixedLoad(v, s, off)
// result: (Addr {fixedSym(b.Func, s, off)} sb) // result: rewriteFixedLoad(v, s, sb, off)
for { for {
if v.Type != typ.BytePtr || v_0.Op != OpOffPtr { if v_0.Op != OpOffPtr {
break break
} }
off := auxIntToInt64(v_0.AuxInt) off := auxIntToInt64(v_0.AuxInt)
@ -14745,19 +14823,17 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
} }
s := auxToSym(v_0_0_0_0.Aux) s := auxToSym(v_0_0_0_0.Aux)
sb := v_0_0_0_0.Args[0] sb := v_0_0_0_0.Args[0]
if !(isFixedSym(s, off)) { if !(isFixedLoad(v, s, off)) {
break break
} }
v.reset(OpAddr) v.copyOf(rewriteFixedLoad(v, s, sb, off))
v.Aux = symToAux(fixedSym(b.Func, s, off))
v.AddArg(sb)
return true return true
} }
// match: (Load <typ.BytePtr> (OffPtr [off] (ITab (IMake (Convert (Addr {s} sb) _) _))) _) // match: (Load (OffPtr [off] (ITab (IMake (Convert (Addr {s} sb) _) _))) _)
// cond: isFixedSym(s, off) // cond: isFixedLoad(v, s, off)
// result: (Addr {fixedSym(b.Func, s, off)} sb) // result: rewriteFixedLoad(v, s, sb, off)
for { for {
if v.Type != typ.BytePtr || v_0.Op != OpOffPtr { if v_0.Op != OpOffPtr {
break break
} }
off := auxIntToInt64(v_0.AuxInt) off := auxIntToInt64(v_0.AuxInt)
@ -14779,232 +14855,10 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
} }
s := auxToSym(v_0_0_0_0_0.Aux) s := auxToSym(v_0_0_0_0_0.Aux)
sb := v_0_0_0_0_0.Args[0] sb := v_0_0_0_0_0.Args[0]
if !(isFixedSym(s, off)) { if !(isFixedLoad(v, s, off)) {
break break
} }
v.reset(OpAddr) v.copyOf(rewriteFixedLoad(v, s, sb, off))
v.Aux = symToAux(fixedSym(b.Func, s, off))
v.AddArg(sb)
return true
}
// match: (Load <typ.Uintptr> (OffPtr [off] (Addr {s} sb) ) _)
// cond: isFixedSym(s, off)
// result: (Addr {fixedSym(b.Func, s, off)} sb)
for {
if v.Type != typ.Uintptr || v_0.Op != OpOffPtr {
break
}
off := auxIntToInt64(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpAddr {
break
}
s := auxToSym(v_0_0.Aux)
sb := v_0_0.Args[0]
if !(isFixedSym(s, off)) {
break
}
v.reset(OpAddr)
v.Aux = symToAux(fixedSym(b.Func, s, off))
v.AddArg(sb)
return true
}
// match: (Load <typ.Uintptr> (OffPtr [off] (Convert (Addr {s} sb) _) ) _)
// cond: isFixedSym(s, off)
// result: (Addr {fixedSym(b.Func, s, off)} sb)
for {
if v.Type != typ.Uintptr || v_0.Op != OpOffPtr {
break
}
off := auxIntToInt64(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpConvert {
break
}
v_0_0_0 := v_0_0.Args[0]
if v_0_0_0.Op != OpAddr {
break
}
s := auxToSym(v_0_0_0.Aux)
sb := v_0_0_0.Args[0]
if !(isFixedSym(s, off)) {
break
}
v.reset(OpAddr)
v.Aux = symToAux(fixedSym(b.Func, s, off))
v.AddArg(sb)
return true
}
// match: (Load <typ.Uintptr> (OffPtr [off] (ITab (IMake (Addr {s} sb) _))) _)
// cond: isFixedSym(s, off)
// result: (Addr {fixedSym(b.Func, s, off)} sb)
for {
if v.Type != typ.Uintptr || v_0.Op != OpOffPtr {
break
}
off := auxIntToInt64(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpITab {
break
}
v_0_0_0 := v_0_0.Args[0]
if v_0_0_0.Op != OpIMake {
break
}
v_0_0_0_0 := v_0_0_0.Args[0]
if v_0_0_0_0.Op != OpAddr {
break
}
s := auxToSym(v_0_0_0_0.Aux)
sb := v_0_0_0_0.Args[0]
if !(isFixedSym(s, off)) {
break
}
v.reset(OpAddr)
v.Aux = symToAux(fixedSym(b.Func, s, off))
v.AddArg(sb)
return true
}
// match: (Load <typ.Uintptr> (OffPtr [off] (ITab (IMake (Convert (Addr {s} sb) _) _))) _)
// cond: isFixedSym(s, off)
// result: (Addr {fixedSym(b.Func, s, off)} sb)
for {
if v.Type != typ.Uintptr || v_0.Op != OpOffPtr {
break
}
off := auxIntToInt64(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpITab {
break
}
v_0_0_0 := v_0_0.Args[0]
if v_0_0_0.Op != OpIMake {
break
}
v_0_0_0_0 := v_0_0_0.Args[0]
if v_0_0_0_0.Op != OpConvert {
break
}
v_0_0_0_0_0 := v_0_0_0_0.Args[0]
if v_0_0_0_0_0.Op != OpAddr {
break
}
s := auxToSym(v_0_0_0_0_0.Aux)
sb := v_0_0_0_0_0.Args[0]
if !(isFixedSym(s, off)) {
break
}
v.reset(OpAddr)
v.Aux = symToAux(fixedSym(b.Func, s, off))
v.AddArg(sb)
return true
}
// match: (Load <t> (OffPtr [off] (Addr {sym} _) ) _)
// cond: t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)
// result: (Const32 [fixed32(config, sym, off)])
for {
t := v.Type
if v_0.Op != OpOffPtr {
break
}
off := auxIntToInt64(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpAddr {
break
}
sym := auxToSym(v_0_0.Aux)
if !(t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)) {
break
}
v.reset(OpConst32)
v.AuxInt = int32ToAuxInt(fixed32(config, sym, off))
return true
}
// match: (Load <t> (OffPtr [off] (Convert (Addr {sym} _) _) ) _)
// cond: t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)
// result: (Const32 [fixed32(config, sym, off)])
for {
t := v.Type
if v_0.Op != OpOffPtr {
break
}
off := auxIntToInt64(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpConvert {
break
}
v_0_0_0 := v_0_0.Args[0]
if v_0_0_0.Op != OpAddr {
break
}
sym := auxToSym(v_0_0_0.Aux)
if !(t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)) {
break
}
v.reset(OpConst32)
v.AuxInt = int32ToAuxInt(fixed32(config, sym, off))
return true
}
// match: (Load <t> (OffPtr [off] (ITab (IMake (Addr {sym} _) _))) _)
// cond: t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)
// result: (Const32 [fixed32(config, sym, off)])
for {
t := v.Type
if v_0.Op != OpOffPtr {
break
}
off := auxIntToInt64(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpITab {
break
}
v_0_0_0 := v_0_0.Args[0]
if v_0_0_0.Op != OpIMake {
break
}
v_0_0_0_0 := v_0_0_0.Args[0]
if v_0_0_0_0.Op != OpAddr {
break
}
sym := auxToSym(v_0_0_0_0.Aux)
if !(t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)) {
break
}
v.reset(OpConst32)
v.AuxInt = int32ToAuxInt(fixed32(config, sym, off))
return true
}
// match: (Load <t> (OffPtr [off] (ITab (IMake (Convert (Addr {sym} _) _) _))) _)
// cond: t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)
// result: (Const32 [fixed32(config, sym, off)])
for {
t := v.Type
if v_0.Op != OpOffPtr {
break
}
off := auxIntToInt64(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpITab {
break
}
v_0_0_0 := v_0_0.Args[0]
if v_0_0_0.Op != OpIMake {
break
}
v_0_0_0_0 := v_0_0_0.Args[0]
if v_0_0_0_0.Op != OpConvert {
break
}
v_0_0_0_0_0 := v_0_0_0_0.Args[0]
if v_0_0_0_0_0.Op != OpAddr {
break
}
sym := auxToSym(v_0_0_0_0_0.Aux)
if !(t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)) {
break
}
v.reset(OpConst32)
v.AuxInt = int32ToAuxInt(fixed32(config, sym, off))
return true return true
} }
return false return false
@ -31117,6 +30971,390 @@ func rewriteValuegeneric_OpStaticLECall(v *Value) bool {
v.AddArg2(v0, mem) v.AddArg2(v0, mem)
return true return true
} }
// match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [3]) mem)
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)
// result: (MakeResult (Eq32 (Or32 <typ.Int32> (ZeroExt16to32 <typ.Int32> (Load <typ.Int16> sptr mem)) (Lsh32x32 <typ.Int32> (ZeroExt8to32 <typ.Int32> (Load <typ.Int8> (OffPtr <typ.BytePtr> [2] sptr) mem)) (Const32 <typ.Int32> [16]))) (Const32 <typ.Int32> [int32(uint32(read16(scon,0,config.ctxt.Arch.ByteOrder))|(uint32(read8(scon,2))<<16))])) mem)
for {
if len(v.Args) != 4 {
break
}
callAux := auxToCall(v.Aux)
mem := v.Args[3]
sptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAddr {
break
}
scon := auxToSym(v_1.Aux)
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpSB {
break
}
v_2 := v.Args[2]
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 3 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)) {
break
}
v.reset(OpMakeResult)
v0 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
v1 := b.NewValue0(v.Pos, OpOr32, typ.Int32)
v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.Int32)
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int16)
v3.AddArg2(sptr, mem)
v2.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpLsh32x32, typ.Int32)
v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.Int32)
v6 := b.NewValue0(v.Pos, OpLoad, typ.Int8)
v7 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtr)
v7.AuxInt = int64ToAuxInt(2)
v7.AddArg(sptr)
v6.AddArg2(v7, mem)
v5.AddArg(v6)
v8 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
v8.AuxInt = int32ToAuxInt(16)
v4.AddArg2(v5, v8)
v1.AddArg2(v2, v4)
v9 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
v9.AuxInt = int32ToAuxInt(int32(uint32(read16(scon, 0, config.ctxt.Arch.ByteOrder)) | (uint32(read8(scon, 2)) << 16)))
v0.AddArg2(v1, v9)
v.AddArg2(v0, mem)
return true
}
// match: (StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [3]) mem)
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)
// result: (MakeResult (Eq32 (Or32 <typ.Int32> (ZeroExt16to32 <typ.Int32> (Load <typ.Int16> sptr mem)) (Lsh32x32 <typ.Int32> (ZeroExt8to32 <typ.Int32> (Load <typ.Int8> (OffPtr <typ.BytePtr> [2] sptr) mem)) (Const32 <typ.Int32> [16]))) (Const32 <typ.Int32> [int32(uint32(read16(scon,0,config.ctxt.Arch.ByteOrder))|(uint32(read8(scon,2))<<16))])) mem)
for {
if len(v.Args) != 4 {
break
}
callAux := auxToCall(v.Aux)
mem := v.Args[3]
v_0 := v.Args[0]
if v_0.Op != OpAddr {
break
}
scon := auxToSym(v_0.Aux)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpSB {
break
}
sptr := v.Args[1]
v_2 := v.Args[2]
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 3 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)) {
break
}
v.reset(OpMakeResult)
v0 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
v1 := b.NewValue0(v.Pos, OpOr32, typ.Int32)
v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.Int32)
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int16)
v3.AddArg2(sptr, mem)
v2.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpLsh32x32, typ.Int32)
v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.Int32)
v6 := b.NewValue0(v.Pos, OpLoad, typ.Int8)
v7 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtr)
v7.AuxInt = int64ToAuxInt(2)
v7.AddArg(sptr)
v6.AddArg2(v7, mem)
v5.AddArg(v6)
v8 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
v8.AuxInt = int32ToAuxInt(16)
v4.AddArg2(v5, v8)
v1.AddArg2(v2, v4)
v9 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
v9.AuxInt = int32ToAuxInt(int32(uint32(read16(scon, 0, config.ctxt.Arch.ByteOrder)) | (uint32(read8(scon, 2)) << 16)))
v0.AddArg2(v1, v9)
v.AddArg2(v0, mem)
return true
}
// match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [5]) mem)
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8
// result: (MakeResult (Eq64 (Or64 <typ.Int64> (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem)) (Lsh64x64 <typ.Int64> (ZeroExt8to64 <typ.Int64> (Load <typ.Int8> (OffPtr <typ.BytePtr> [4] sptr) mem)) (Const64 <typ.Int64> [32]))) (Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read8(scon,4))<<32))])) mem)
for {
if len(v.Args) != 4 {
break
}
callAux := auxToCall(v.Aux)
mem := v.Args[3]
sptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAddr {
break
}
scon := auxToSym(v_1.Aux)
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpSB {
break
}
v_2 := v.Args[2]
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 5 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8) {
break
}
v.reset(OpMakeResult)
v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool)
v1 := b.NewValue0(v.Pos, OpOr64, typ.Int64)
v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.Int64)
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
v3.AddArg2(sptr, mem)
v2.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpLsh64x64, typ.Int64)
v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.Int64)
v6 := b.NewValue0(v.Pos, OpLoad, typ.Int8)
v7 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtr)
v7.AuxInt = int64ToAuxInt(4)
v7.AddArg(sptr)
v6.AddArg2(v7, mem)
v5.AddArg(v6)
v8 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
v8.AuxInt = int64ToAuxInt(32)
v4.AddArg2(v5, v8)
v1.AddArg2(v2, v4)
v9 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
v9.AuxInt = int64ToAuxInt(int64(uint64(read32(scon, 0, config.ctxt.Arch.ByteOrder)) | (uint64(read8(scon, 4)) << 32)))
v0.AddArg2(v1, v9)
v.AddArg2(v0, mem)
return true
}
// match: (StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [5]) mem)
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8
// result: (MakeResult (Eq64 (Or64 <typ.Int64> (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem)) (Lsh64x64 <typ.Int64> (ZeroExt8to64 <typ.Int64> (Load <typ.Int8> (OffPtr <typ.BytePtr> [4] sptr) mem)) (Const64 <typ.Int64> [32]))) (Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read8(scon,4))<<32))])) mem)
for {
if len(v.Args) != 4 {
break
}
callAux := auxToCall(v.Aux)
mem := v.Args[3]
v_0 := v.Args[0]
if v_0.Op != OpAddr {
break
}
scon := auxToSym(v_0.Aux)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpSB {
break
}
sptr := v.Args[1]
v_2 := v.Args[2]
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 5 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8) {
break
}
v.reset(OpMakeResult)
v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool)
v1 := b.NewValue0(v.Pos, OpOr64, typ.Int64)
v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.Int64)
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
v3.AddArg2(sptr, mem)
v2.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpLsh64x64, typ.Int64)
v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.Int64)
v6 := b.NewValue0(v.Pos, OpLoad, typ.Int8)
v7 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtr)
v7.AuxInt = int64ToAuxInt(4)
v7.AddArg(sptr)
v6.AddArg2(v7, mem)
v5.AddArg(v6)
v8 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
v8.AuxInt = int64ToAuxInt(32)
v4.AddArg2(v5, v8)
v1.AddArg2(v2, v4)
v9 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
v9.AuxInt = int64ToAuxInt(int64(uint64(read32(scon, 0, config.ctxt.Arch.ByteOrder)) | (uint64(read8(scon, 4)) << 32)))
v0.AddArg2(v1, v9)
v.AddArg2(v0, mem)
return true
}
// match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [6]) mem)
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8
// result: (MakeResult (Eq64 (Or64 <typ.Int64> (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem)) (Lsh64x64 <typ.Int64> (ZeroExt16to64 <typ.Int64> (Load <typ.Int16> (OffPtr <typ.BytePtr> [4] sptr) mem)) (Const64 <typ.Int64> [32]))) (Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read16(scon,4,config.ctxt.Arch.ByteOrder))<<32))])) mem)
for {
if len(v.Args) != 4 {
break
}
callAux := auxToCall(v.Aux)
mem := v.Args[3]
sptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAddr {
break
}
scon := auxToSym(v_1.Aux)
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpSB {
break
}
v_2 := v.Args[2]
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 6 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8) {
break
}
v.reset(OpMakeResult)
v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool)
v1 := b.NewValue0(v.Pos, OpOr64, typ.Int64)
v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.Int64)
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
v3.AddArg2(sptr, mem)
v2.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpLsh64x64, typ.Int64)
v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.Int64)
v6 := b.NewValue0(v.Pos, OpLoad, typ.Int16)
v7 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtr)
v7.AuxInt = int64ToAuxInt(4)
v7.AddArg(sptr)
v6.AddArg2(v7, mem)
v5.AddArg(v6)
v8 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
v8.AuxInt = int64ToAuxInt(32)
v4.AddArg2(v5, v8)
v1.AddArg2(v2, v4)
v9 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
v9.AuxInt = int64ToAuxInt(int64(uint64(read32(scon, 0, config.ctxt.Arch.ByteOrder)) | (uint64(read16(scon, 4, config.ctxt.Arch.ByteOrder)) << 32)))
v0.AddArg2(v1, v9)
v.AddArg2(v0, mem)
return true
}
// match: (StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [6]) mem)
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8
// result: (MakeResult (Eq64 (Or64 <typ.Int64> (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem)) (Lsh64x64 <typ.Int64> (ZeroExt16to64 <typ.Int64> (Load <typ.Int16> (OffPtr <typ.BytePtr> [4] sptr) mem)) (Const64 <typ.Int64> [32]))) (Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read16(scon,4,config.ctxt.Arch.ByteOrder))<<32))])) mem)
for {
if len(v.Args) != 4 {
break
}
callAux := auxToCall(v.Aux)
mem := v.Args[3]
v_0 := v.Args[0]
if v_0.Op != OpAddr {
break
}
scon := auxToSym(v_0.Aux)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpSB {
break
}
sptr := v.Args[1]
v_2 := v.Args[2]
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 6 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8) {
break
}
v.reset(OpMakeResult)
v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool)
v1 := b.NewValue0(v.Pos, OpOr64, typ.Int64)
v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.Int64)
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
v3.AddArg2(sptr, mem)
v2.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpLsh64x64, typ.Int64)
v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.Int64)
v6 := b.NewValue0(v.Pos, OpLoad, typ.Int16)
v7 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtr)
v7.AuxInt = int64ToAuxInt(4)
v7.AddArg(sptr)
v6.AddArg2(v7, mem)
v5.AddArg(v6)
v8 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
v8.AuxInt = int64ToAuxInt(32)
v4.AddArg2(v5, v8)
v1.AddArg2(v2, v4)
v9 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
v9.AuxInt = int64ToAuxInt(int64(uint64(read32(scon, 0, config.ctxt.Arch.ByteOrder)) | (uint64(read16(scon, 4, config.ctxt.Arch.ByteOrder)) << 32)))
v0.AddArg2(v1, v9)
v.AddArg2(v0, mem)
return true
}
// match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [7]) mem)
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8
// result: (MakeResult (Eq64 (Or64 <typ.Int64> (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem)) (Lsh64x64 <typ.Int64> (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> (OffPtr <typ.BytePtr> [3] sptr) mem)) (Const64 <typ.Int64> [32]))) (Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read32(scon,3,config.ctxt.Arch.ByteOrder))<<32))])) mem)
for {
if len(v.Args) != 4 {
break
}
callAux := auxToCall(v.Aux)
mem := v.Args[3]
sptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAddr {
break
}
scon := auxToSym(v_1.Aux)
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpSB {
break
}
v_2 := v.Args[2]
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 7 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8) {
break
}
v.reset(OpMakeResult)
v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool)
v1 := b.NewValue0(v.Pos, OpOr64, typ.Int64)
v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.Int64)
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
v3.AddArg2(sptr, mem)
v2.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpLsh64x64, typ.Int64)
v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.Int64)
v6 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
v7 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtr)
v7.AuxInt = int64ToAuxInt(3)
v7.AddArg(sptr)
v6.AddArg2(v7, mem)
v5.AddArg(v6)
v8 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
v8.AuxInt = int64ToAuxInt(32)
v4.AddArg2(v5, v8)
v1.AddArg2(v2, v4)
v9 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
v9.AuxInt = int64ToAuxInt(int64(uint64(read32(scon, 0, config.ctxt.Arch.ByteOrder)) | (uint64(read32(scon, 3, config.ctxt.Arch.ByteOrder)) << 32)))
v0.AddArg2(v1, v9)
v.AddArg2(v0, mem)
return true
}
// match: (StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [7]) mem)
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8
// result: (MakeResult (Eq64 (Or64 <typ.Int64> (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem)) (Lsh64x64 <typ.Int64> (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> (OffPtr <typ.BytePtr> [3] sptr) mem)) (Const64 <typ.Int64> [32]))) (Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read32(scon,3,config.ctxt.Arch.ByteOrder))<<32))])) mem)
for {
if len(v.Args) != 4 {
break
}
callAux := auxToCall(v.Aux)
mem := v.Args[3]
v_0 := v.Args[0]
if v_0.Op != OpAddr {
break
}
scon := auxToSym(v_0.Aux)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpSB {
break
}
sptr := v.Args[1]
v_2 := v.Args[2]
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 7 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8) {
break
}
v.reset(OpMakeResult)
v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool)
v1 := b.NewValue0(v.Pos, OpOr64, typ.Int64)
v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.Int64)
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
v3.AddArg2(sptr, mem)
v2.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpLsh64x64, typ.Int64)
v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.Int64)
v6 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
v7 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtr)
v7.AuxInt = int64ToAuxInt(3)
v7.AddArg(sptr)
v6.AddArg2(v7, mem)
v5.AddArg(v6)
v8 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
v8.AuxInt = int64ToAuxInt(32)
v4.AddArg2(v5, v8)
v1.AddArg2(v2, v4)
v9 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
v9.AuxInt = int64ToAuxInt(int64(uint64(read32(scon, 0, config.ctxt.Arch.ByteOrder)) | (uint64(read32(scon, 3, config.ctxt.Arch.ByteOrder)) << 32)))
v0.AddArg2(v1, v9)
v.AddArg2(v0, mem)
return true
}
// match: (StaticLECall {callAux} _ _ (Const64 [0]) mem) // match: (StaticLECall {callAux} _ _ (Const64 [0]) mem)
// cond: isSameCall(callAux, "runtime.memequal") // cond: isSameCall(callAux, "runtime.memequal")
// result: (MakeResult (ConstBool <typ.Bool> [true]) mem) // result: (MakeResult (ConstBool <typ.Bool> [true]) mem)

View file

@ -144,6 +144,13 @@ func (v *Value) AuxArm64BitField() arm64BitField {
return arm64BitField(v.AuxInt) return arm64BitField(v.AuxInt)
} }
func (v *Value) AuxArm64ConditionalParams() arm64ConditionalParams {
if opcodeTable[v.Op].auxType != auxARM64ConditionalParams {
v.Fatalf("op %s doesn't have a ARM64ConditionalParams aux field", v.Op)
}
return auxIntToArm64ConditionalParams(v.AuxInt)
}
// long form print. v# = opcode <type> [aux] args [: reg] (names) // long form print. v# = opcode <type> [aux] args [: reg] (names)
func (v *Value) LongString() string { func (v *Value) LongString() string {
if v == nil { if v == nil {
@ -203,6 +210,15 @@ func (v *Value) auxString() string {
lsb := v.AuxArm64BitField().lsb() lsb := v.AuxArm64BitField().lsb()
width := v.AuxArm64BitField().width() width := v.AuxArm64BitField().width()
return fmt.Sprintf(" [lsb=%d,width=%d]", lsb, width) return fmt.Sprintf(" [lsb=%d,width=%d]", lsb, width)
case auxARM64ConditionalParams:
params := v.AuxArm64ConditionalParams()
cond := params.Cond()
nzcv := params.Nzcv()
imm, ok := params.ConstValue()
if ok {
return fmt.Sprintf(" [cond=%s,nzcv=%d,imm=%d]", cond, nzcv, imm)
}
return fmt.Sprintf(" [cond=%s,nzcv=%d]", cond, nzcv)
case auxFloat32, auxFloat64: case auxFloat32, auxFloat64:
return fmt.Sprintf(" [%g]", v.AuxFloat()) return fmt.Sprintf(" [%g]", v.AuxFloat())
case auxString: case auxString:

View file

@ -233,6 +233,13 @@ func TestIntendedInlining(t *testing.T) {
"testing": { "testing": {
"(*B).Loop", "(*B).Loop",
}, },
"path": {
"Base",
"scanChunk",
},
"path/filepath": {
"scanChunk",
},
} }
if runtime.GOARCH != "386" && runtime.GOARCH != "loong64" && runtime.GOARCH != "mips64" && runtime.GOARCH != "mips64le" && runtime.GOARCH != "riscv64" { if runtime.GOARCH != "386" && runtime.GOARCH != "loong64" && runtime.GOARCH != "mips64" && runtime.GOARCH != "mips64le" && runtime.GOARCH != "riscv64" {

View file

@ -162,12 +162,25 @@ func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
} }
// resolve resolves the type parameters, methods, and underlying type of n. // resolve resolves the type parameters, methods, and underlying type of n.
// This information may be loaded from a provided loader function, or computed
// from an origin type (in the case of instances).
// //
// After resolution, the type parameters, methods, and underlying type of n are // For the purposes of resolution, there are three categories of named types:
// accessible; but if n is an instantiated type, its methods may still be // 1. Instantiated Types
// unexpanded. // 2. Lazy Loaded Types
// 3. All Others
//
// Note that the above form a partition.
//
// Instantiated types:
// Type parameters, methods, and underlying type of n become accessible,
// though methods are lazily populated as needed.
//
// Lazy loaded types:
// Type parameters, methods, and underlying type of n become accessible
// and are fully expanded.
//
// All others:
// Effectively, nothing happens. The underlying type of n may still be
// a named type.
func (n *Named) resolve() *Named { func (n *Named) resolve() *Named {
if n.state() > unresolved { // avoid locking below if n.state() > unresolved { // avoid locking below
return n return n

View file

@ -804,7 +804,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
} }
case ssa.Op386LoweredPanicBoundsCR: case ssa.Op386LoweredPanicBoundsCR:
yIsReg = true yIsReg = true
yVal := int(v.Args[0].Reg() - x86.REG_AX) yVal = int(v.Args[0].Reg() - x86.REG_AX)
c := v.Aux.(ssa.PanicBoundsC).C c := v.Aux.(ssa.PanicBoundsC).C
if c >= 0 && c <= abi.BoundsMaxConst { if c >= 0 && c <= abi.BoundsMaxConst {
xVal = int(c) xVal = int(c)

View file

@ -49,6 +49,7 @@ var bootstrapDirs = []string{
"cmd/internal/macho", "cmd/internal/macho",
"cmd/internal/obj/...", "cmd/internal/obj/...",
"cmd/internal/objabi", "cmd/internal/objabi",
"cmd/internal/par",
"cmd/internal/pgo", "cmd/internal/pgo",
"cmd/internal/pkgpath", "cmd/internal/pkgpath",
"cmd/internal/quoted", "cmd/internal/quoted",

View file

@ -1213,7 +1213,7 @@ func (t *tester) internalLinkPIE() bool {
case "darwin-amd64", "darwin-arm64", case "darwin-amd64", "darwin-arm64",
"linux-amd64", "linux-arm64", "linux-loong64", "linux-ppc64le", "linux-amd64", "linux-arm64", "linux-loong64", "linux-ppc64le",
"android-arm64", "android-arm64",
"windows-amd64", "windows-386", "windows-arm": "windows-amd64", "windows-386":
return true return true
} }
return false return false

View file

@ -6,9 +6,6 @@ package main
import ( import (
"go/ast" "go/ast"
"go/token"
"reflect"
"strings"
) )
func init() { func init() {
@ -18,130 +15,11 @@ func init() {
var cftypeFix = fix{ var cftypeFix = fix{
name: "cftype", name: "cftype",
date: "2017-09-27", date: "2017-09-27",
f: cftypefix, f: noop,
desc: `Fixes initializers and casts of C.*Ref and JNI types`, desc: `Fixes initializers and casts of C.*Ref and JNI types (removed)`,
disabled: false, disabled: false,
} }
// Old state: func noop(f *ast.File) bool {
// return false
// type CFTypeRef unsafe.Pointer
//
// New state:
//
// type CFTypeRef uintptr
//
// and similar for other *Ref types.
// This fix finds nils initializing these types and replaces the nils with 0s.
func cftypefix(f *ast.File) bool {
return typefix(f, func(s string) bool {
return strings.HasPrefix(s, "C.") && strings.HasSuffix(s, "Ref") && s != "C.CFAllocatorRef"
})
}
// typefix replaces nil with 0 for all nils whose type, when passed to badType, returns true.
func typefix(f *ast.File, badType func(string) bool) bool {
if !imports(f, "C") {
return false
}
typeof, _ := typecheck(&TypeConfig{}, f)
changed := false
// step 1: Find all the nils with the offending types.
// Compute their replacement.
badNils := map[any]ast.Expr{}
walk(f, func(n any) {
if i, ok := n.(*ast.Ident); ok && i.Name == "nil" && badType(typeof[n]) {
badNils[n] = &ast.BasicLit{ValuePos: i.NamePos, Kind: token.INT, Value: "0"}
}
})
// step 2: find all uses of the bad nils, replace them with 0.
// There's no easy way to map from an ast.Expr to all the places that use them, so
// we use reflect to find all such references.
if len(badNils) > 0 {
exprType := reflect.TypeFor[ast.Expr]()
exprSliceType := reflect.TypeFor[[]ast.Expr]()
walk(f, func(n any) {
if n == nil {
return
}
v := reflect.ValueOf(n)
if v.Kind() != reflect.Pointer {
return
}
if v.IsNil() {
return
}
v = v.Elem()
if v.Kind() != reflect.Struct {
return
}
for i := 0; i < v.NumField(); i++ {
f := v.Field(i)
if f.Type() == exprType {
if r := badNils[f.Interface()]; r != nil {
f.Set(reflect.ValueOf(r))
changed = true
}
}
if f.Type() == exprSliceType {
for j := 0; j < f.Len(); j++ {
e := f.Index(j)
if r := badNils[e.Interface()]; r != nil {
e.Set(reflect.ValueOf(r))
changed = true
}
}
}
}
})
}
// step 3: fix up invalid casts.
// It used to be ok to cast between *unsafe.Pointer and *C.CFTypeRef in a single step.
// Now we need unsafe.Pointer as an intermediate cast.
// (*unsafe.Pointer)(x) where x is type *bad -> (*unsafe.Pointer)(unsafe.Pointer(x))
// (*bad.type)(x) where x is type *unsafe.Pointer -> (*bad.type)(unsafe.Pointer(x))
walk(f, func(n any) {
if n == nil {
return
}
// Find pattern like (*a.b)(x)
c, ok := n.(*ast.CallExpr)
if !ok {
return
}
if len(c.Args) != 1 {
return
}
p, ok := c.Fun.(*ast.ParenExpr)
if !ok {
return
}
s, ok := p.X.(*ast.StarExpr)
if !ok {
return
}
t, ok := s.X.(*ast.SelectorExpr)
if !ok {
return
}
pkg, ok := t.X.(*ast.Ident)
if !ok {
return
}
dst := pkg.Name + "." + t.Sel.Name
src := typeof[c.Args[0]]
if badType(dst) && src == "*unsafe.Pointer" ||
dst == "unsafe.Pointer" && strings.HasPrefix(src, "*") && badType(src[1:]) {
c.Args[0] = &ast.CallExpr{
Fun: &ast.SelectorExpr{X: &ast.Ident{Name: "unsafe"}, Sel: &ast.Ident{Name: "Pointer"}},
Args: []ast.Expr{c.Args[0]},
}
changed = true
}
})
return changed
} }

View file

@ -1,241 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
func init() {
addTestCases(cftypeTests, cftypefix)
}
var cftypeTests = []testCase{
{
Name: "cftype.localVariable",
In: `package main
// typedef const void *CFTypeRef;
import "C"
func f() {
var x C.CFTypeRef = nil
x = nil
x, x = nil, nil
}
`,
Out: `package main
// typedef const void *CFTypeRef;
import "C"
func f() {
var x C.CFTypeRef = 0
x = 0
x, x = 0, 0
}
`,
},
{
Name: "cftype.globalVariable",
In: `package main
// typedef const void *CFTypeRef;
import "C"
var x C.CFTypeRef = nil
func f() {
x = nil
}
`,
Out: `package main
// typedef const void *CFTypeRef;
import "C"
var x C.CFTypeRef = 0
func f() {
x = 0
}
`,
},
{
Name: "cftype.EqualArgument",
In: `package main
// typedef const void *CFTypeRef;
import "C"
var x C.CFTypeRef
var y = x == nil
var z = x != nil
`,
Out: `package main
// typedef const void *CFTypeRef;
import "C"
var x C.CFTypeRef
var y = x == 0
var z = x != 0
`,
},
{
Name: "cftype.StructField",
In: `package main
// typedef const void *CFTypeRef;
import "C"
type T struct {
x C.CFTypeRef
}
var t = T{x: nil}
`,
Out: `package main
// typedef const void *CFTypeRef;
import "C"
type T struct {
x C.CFTypeRef
}
var t = T{x: 0}
`,
},
{
Name: "cftype.FunctionArgument",
In: `package main
// typedef const void *CFTypeRef;
import "C"
func f(x C.CFTypeRef) {
}
func g() {
f(nil)
}
`,
Out: `package main
// typedef const void *CFTypeRef;
import "C"
func f(x C.CFTypeRef) {
}
func g() {
f(0)
}
`,
},
{
Name: "cftype.ArrayElement",
In: `package main
// typedef const void *CFTypeRef;
import "C"
var x = [3]C.CFTypeRef{nil, nil, nil}
`,
Out: `package main
// typedef const void *CFTypeRef;
import "C"
var x = [3]C.CFTypeRef{0, 0, 0}
`,
},
{
Name: "cftype.SliceElement",
In: `package main
// typedef const void *CFTypeRef;
import "C"
var x = []C.CFTypeRef{nil, nil, nil}
`,
Out: `package main
// typedef const void *CFTypeRef;
import "C"
var x = []C.CFTypeRef{0, 0, 0}
`,
},
{
Name: "cftype.MapKey",
In: `package main
// typedef const void *CFTypeRef;
import "C"
var x = map[C.CFTypeRef]int{nil: 0}
`,
Out: `package main
// typedef const void *CFTypeRef;
import "C"
var x = map[C.CFTypeRef]int{0: 0}
`,
},
{
Name: "cftype.MapValue",
In: `package main
// typedef const void *CFTypeRef;
import "C"
var x = map[int]C.CFTypeRef{0: nil}
`,
Out: `package main
// typedef const void *CFTypeRef;
import "C"
var x = map[int]C.CFTypeRef{0: 0}
`,
},
{
Name: "cftype.Conversion1",
In: `package main
// typedef const void *CFTypeRef;
import "C"
var x C.CFTypeRef
var y = (*unsafe.Pointer)(&x)
`,
Out: `package main
// typedef const void *CFTypeRef;
import "C"
var x C.CFTypeRef
var y = (*unsafe.Pointer)(unsafe.Pointer(&x))
`,
},
{
Name: "cftype.Conversion2",
In: `package main
// typedef const void *CFTypeRef;
import "C"
var x unsafe.Pointer
var y = (*C.CFTypeRef)(&x)
`,
Out: `package main
// typedef const void *CFTypeRef;
import "C"
var x unsafe.Pointer
var y = (*C.CFTypeRef)(unsafe.Pointer(&x))
`,
},
}

View file

@ -4,10 +4,6 @@
package main package main
import (
"go/ast"
)
func init() { func init() {
register(contextFix) register(contextFix)
} }
@ -15,11 +11,7 @@ func init() {
var contextFix = fix{ var contextFix = fix{
name: "context", name: "context",
date: "2016-09-09", date: "2016-09-09",
f: ctxfix, f: noop,
desc: `Change imports of golang.org/x/net/context to context`, desc: `Change imports of golang.org/x/net/context to context (removed)`,
disabled: false, disabled: false,
} }
func ctxfix(f *ast.File) bool {
return rewriteImport(f, "golang.org/x/net/context", "context")
}

View file

@ -1,42 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
func init() {
addTestCases(contextTests, ctxfix)
}
var contextTests = []testCase{
{
Name: "context.0",
In: `package main
import "golang.org/x/net/context"
var _ = "golang.org/x/net/context"
`,
Out: `package main
import "context"
var _ = "golang.org/x/net/context"
`,
},
{
Name: "context.1",
In: `package main
import ctx "golang.org/x/net/context"
var _ = ctx.Background()
`,
Out: `package main
import ctx "context"
var _ = ctx.Background()
`,
},
}

View file

@ -4,10 +4,6 @@
package main package main
import (
"go/ast"
)
func init() { func init() {
register(eglFixDisplay) register(eglFixDisplay)
register(eglFixConfig) register(eglFixConfig)
@ -16,45 +12,15 @@ func init() {
var eglFixDisplay = fix{ var eglFixDisplay = fix{
name: "egl", name: "egl",
date: "2018-12-15", date: "2018-12-15",
f: eglfixDisp, f: noop,
desc: `Fixes initializers of EGLDisplay`, desc: `Fixes initializers of EGLDisplay (removed)`,
disabled: false, disabled: false,
} }
// Old state:
//
// type EGLDisplay unsafe.Pointer
//
// New state:
//
// type EGLDisplay uintptr
//
// This fix finds nils initializing these types and replaces the nils with 0s.
func eglfixDisp(f *ast.File) bool {
return typefix(f, func(s string) bool {
return s == "C.EGLDisplay"
})
}
var eglFixConfig = fix{ var eglFixConfig = fix{
name: "eglconf", name: "eglconf",
date: "2020-05-30", date: "2020-05-30",
f: eglfixConfig, f: noop,
desc: `Fixes initializers of EGLConfig`, desc: `Fixes initializers of EGLConfig (removed)`,
disabled: false, disabled: false,
} }
// Old state:
//
// type EGLConfig unsafe.Pointer
//
// New state:
//
// type EGLConfig uintptr
//
// This fix finds nils initializing these types and replaces the nils with 0s.
func eglfixConfig(f *ast.File) bool {
return typefix(f, func(s string) bool {
return s == "C.EGLConfig"
})
}

View file

@ -1,214 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import "strings"
func init() {
addTestCases(eglTestsFor("EGLDisplay"), eglfixDisp)
addTestCases(eglTestsFor("EGLConfig"), eglfixConfig)
}
func eglTestsFor(tname string) []testCase {
var eglTests = []testCase{
{
Name: "egl.localVariable",
In: `package main
// typedef void *$EGLTYPE;
import "C"
func f() {
var x C.$EGLTYPE = nil
x = nil
x, x = nil, nil
}
`,
Out: `package main
// typedef void *$EGLTYPE;
import "C"
func f() {
var x C.$EGLTYPE = 0
x = 0
x, x = 0, 0
}
`,
},
{
Name: "egl.globalVariable",
In: `package main
// typedef void *$EGLTYPE;
import "C"
var x C.$EGLTYPE = nil
func f() {
x = nil
}
`,
Out: `package main
// typedef void *$EGLTYPE;
import "C"
var x C.$EGLTYPE = 0
func f() {
x = 0
}
`,
},
{
Name: "egl.EqualArgument",
In: `package main
// typedef void *$EGLTYPE;
import "C"
var x C.$EGLTYPE
var y = x == nil
var z = x != nil
`,
Out: `package main
// typedef void *$EGLTYPE;
import "C"
var x C.$EGLTYPE
var y = x == 0
var z = x != 0
`,
},
{
Name: "egl.StructField",
In: `package main
// typedef void *$EGLTYPE;
import "C"
type T struct {
x C.$EGLTYPE
}
var t = T{x: nil}
`,
Out: `package main
// typedef void *$EGLTYPE;
import "C"
type T struct {
x C.$EGLTYPE
}
var t = T{x: 0}
`,
},
{
Name: "egl.FunctionArgument",
In: `package main
// typedef void *$EGLTYPE;
import "C"
func f(x C.$EGLTYPE) {
}
func g() {
f(nil)
}
`,
Out: `package main
// typedef void *$EGLTYPE;
import "C"
func f(x C.$EGLTYPE) {
}
func g() {
f(0)
}
`,
},
{
Name: "egl.ArrayElement",
In: `package main
// typedef void *$EGLTYPE;
import "C"
var x = [3]C.$EGLTYPE{nil, nil, nil}
`,
Out: `package main
// typedef void *$EGLTYPE;
import "C"
var x = [3]C.$EGLTYPE{0, 0, 0}
`,
},
{
Name: "egl.SliceElement",
In: `package main
// typedef void *$EGLTYPE;
import "C"
var x = []C.$EGLTYPE{nil, nil, nil}
`,
Out: `package main
// typedef void *$EGLTYPE;
import "C"
var x = []C.$EGLTYPE{0, 0, 0}
`,
},
{
Name: "egl.MapKey",
In: `package main
// typedef void *$EGLTYPE;
import "C"
var x = map[C.$EGLTYPE]int{nil: 0}
`,
Out: `package main
// typedef void *$EGLTYPE;
import "C"
var x = map[C.$EGLTYPE]int{0: 0}
`,
},
{
Name: "egl.MapValue",
In: `package main
// typedef void *$EGLTYPE;
import "C"
var x = map[int]C.$EGLTYPE{0: nil}
`,
Out: `package main
// typedef void *$EGLTYPE;
import "C"
var x = map[int]C.$EGLTYPE{0: 0}
`,
},
}
for i := range eglTests {
t := &eglTests[i]
t.In = strings.ReplaceAll(t.In, "$EGLTYPE", tname)
t.Out = strings.ReplaceAll(t.Out, "$EGLTYPE", tname)
}
return eglTests
}

View file

@ -4,11 +4,6 @@
package main package main
import (
"go/ast"
"strconv"
)
func init() { func init() {
register(gotypesFix) register(gotypesFix)
} }
@ -16,60 +11,6 @@ func init() {
var gotypesFix = fix{ var gotypesFix = fix{
name: "gotypes", name: "gotypes",
date: "2015-07-16", date: "2015-07-16",
f: gotypes, f: noop,
desc: `Change imports of golang.org/x/tools/go/{exact,types} to go/{constant,types}`, desc: `Change imports of golang.org/x/tools/go/{exact,types} to go/{constant,types} (removed)`,
}
func gotypes(f *ast.File) bool {
fixed := fixGoTypes(f)
if fixGoExact(f) {
fixed = true
}
return fixed
}
func fixGoTypes(f *ast.File) bool {
return rewriteImport(f, "golang.org/x/tools/go/types", "go/types")
}
func fixGoExact(f *ast.File) bool {
// This one is harder because the import name changes.
// First find the import spec.
var importSpec *ast.ImportSpec
walk(f, func(n any) {
if importSpec != nil {
return
}
spec, ok := n.(*ast.ImportSpec)
if !ok {
return
}
path, err := strconv.Unquote(spec.Path.Value)
if err != nil {
return
}
if path == "golang.org/x/tools/go/exact" {
importSpec = spec
}
})
if importSpec == nil {
return false
}
// We are about to rename exact.* to constant.*, but constant is a common
// name. See if it will conflict. This is a hack but it is effective.
exists := renameTop(f, "constant", "constant")
suffix := ""
if exists {
suffix = "_"
}
// Now we need to rename all the uses of the import. RewriteImport
// affects renameTop, but not vice versa, so do them in this order.
renameTop(f, "exact", "constant"+suffix)
rewriteImport(f, "golang.org/x/tools/go/exact", "go/constant")
// renameTop will also rewrite the imported package name. Fix that;
// we know it should be missing.
importSpec.Name = nil
return true
} }

View file

@ -1,89 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
func init() {
addTestCases(gotypesTests, gotypes)
}
var gotypesTests = []testCase{
{
Name: "gotypes.0",
In: `package main
import "golang.org/x/tools/go/types"
import "golang.org/x/tools/go/exact"
var _ = exact.Kind
func f() {
_ = exact.MakeBool(true)
}
`,
Out: `package main
import "go/types"
import "go/constant"
var _ = constant.Kind
func f() {
_ = constant.MakeBool(true)
}
`,
},
{
Name: "gotypes.1",
In: `package main
import "golang.org/x/tools/go/types"
import foo "golang.org/x/tools/go/exact"
var _ = foo.Kind
func f() {
_ = foo.MakeBool(true)
}
`,
Out: `package main
import "go/types"
import "go/constant"
var _ = foo.Kind
func f() {
_ = foo.MakeBool(true)
}
`,
},
{
Name: "gotypes.0",
In: `package main
import "golang.org/x/tools/go/types"
import "golang.org/x/tools/go/exact"
var _ = exact.Kind
var constant = 23 // Use of new package name.
func f() {
_ = exact.MakeBool(true)
}
`,
Out: `package main
import "go/types"
import "go/constant"
var _ = constant_.Kind
var constant = 23 // Use of new package name.
func f() {
_ = constant_.MakeBool(true)
}
`,
},
}

View file

@ -4,10 +4,6 @@
package main package main
import (
"go/ast"
)
func init() { func init() {
register(jniFix) register(jniFix)
} }
@ -15,55 +11,7 @@ func init() {
var jniFix = fix{ var jniFix = fix{
name: "jni", name: "jni",
date: "2017-12-04", date: "2017-12-04",
f: jnifix, f: noop,
desc: `Fixes initializers of JNI's jobject and subtypes`, desc: `Fixes initializers of JNI's jobject and subtypes (removed)`,
disabled: false, disabled: false,
} }
// Old state:
//
// type jobject *_jobject
//
// New state:
//
// type jobject uintptr
//
// and similar for subtypes of jobject.
// This fix finds nils initializing these types and replaces the nils with 0s.
func jnifix(f *ast.File) bool {
return typefix(f, func(s string) bool {
switch s {
case "C.jobject":
return true
case "C.jclass":
return true
case "C.jthrowable":
return true
case "C.jstring":
return true
case "C.jarray":
return true
case "C.jbooleanArray":
return true
case "C.jbyteArray":
return true
case "C.jcharArray":
return true
case "C.jshortArray":
return true
case "C.jintArray":
return true
case "C.jlongArray":
return true
case "C.jfloatArray":
return true
case "C.jdoubleArray":
return true
case "C.jobjectArray":
return true
case "C.jweak":
return true
}
return false
})
}

View file

@ -1,203 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
func init() {
addTestCases(jniTests, jnifix)
}
var jniTests = []testCase{
{
Name: "jni.localVariable",
In: `package main
// typedef struct _jobject* jobject;
import "C"
func f() {
var x C.jobject = nil
x = nil
x, x = nil, nil
}
`,
Out: `package main
// typedef struct _jobject* jobject;
import "C"
func f() {
var x C.jobject = 0
x = 0
x, x = 0, 0
}
`,
},
{
Name: "jni.globalVariable",
In: `package main
// typedef struct _jobject* jobject;
import "C"
var x C.jobject = nil
func f() {
x = nil
}
`,
Out: `package main
// typedef struct _jobject* jobject;
import "C"
var x C.jobject = 0
func f() {
x = 0
}
`,
},
{
Name: "jni.EqualArgument",
In: `package main
// typedef struct _jobject* jobject;
import "C"
var x C.jobject
var y = x == nil
var z = x != nil
`,
Out: `package main
// typedef struct _jobject* jobject;
import "C"
var x C.jobject
var y = x == 0
var z = x != 0
`,
},
{
Name: "jni.StructField",
In: `package main
// typedef struct _jobject* jobject;
import "C"
type T struct {
x C.jobject
}
var t = T{x: nil}
`,
Out: `package main
// typedef struct _jobject* jobject;
import "C"
type T struct {
x C.jobject
}
var t = T{x: 0}
`,
},
{
Name: "jni.FunctionArgument",
In: `package main
// typedef struct _jobject* jobject;
import "C"
func f(x C.jobject) {
}
func g() {
f(nil)
}
`,
Out: `package main
// typedef struct _jobject* jobject;
import "C"
func f(x C.jobject) {
}
func g() {
f(0)
}
`,
},
{
Name: "jni.ArrayElement",
In: `package main
// typedef struct _jobject* jobject;
import "C"
var x = [3]C.jobject{nil, nil, nil}
`,
Out: `package main
// typedef struct _jobject* jobject;
import "C"
var x = [3]C.jobject{0, 0, 0}
`,
},
{
Name: "jni.SliceElement",
In: `package main
// typedef struct _jobject* jobject;
import "C"
var x = []C.jobject{nil, nil, nil}
`,
Out: `package main
// typedef struct _jobject* jobject;
import "C"
var x = []C.jobject{0, 0, 0}
`,
},
{
Name: "jni.MapKey",
In: `package main
// typedef struct _jobject* jobject;
import "C"
var x = map[C.jobject]int{nil: 0}
`,
Out: `package main
// typedef struct _jobject* jobject;
import "C"
var x = map[C.jobject]int{0: 0}
`,
},
{
Name: "jni.MapValue",
In: `package main
// typedef struct _jobject* jobject;
import "C"
var x = map[int]C.jobject{0: nil}
`,
Out: `package main
// typedef struct _jobject* jobject;
import "C"
var x = map[int]C.jobject{0: 0}
`,
},
}

View file

@ -4,11 +4,6 @@
package main package main
import (
"go/ast"
"slices"
)
func init() { func init() {
register(netipv6zoneFix) register(netipv6zoneFix)
} }
@ -16,56 +11,9 @@ func init() {
var netipv6zoneFix = fix{ var netipv6zoneFix = fix{
name: "netipv6zone", name: "netipv6zone",
date: "2012-11-26", date: "2012-11-26",
f: netipv6zone, f: noop,
desc: `Adapt element key to IPAddr, UDPAddr or TCPAddr composite literals. desc: `Adapt element key to IPAddr, UDPAddr or TCPAddr composite literals (removed).
https://codereview.appspot.com/6849045/ https://codereview.appspot.com/6849045/
`, `,
} }
func netipv6zone(f *ast.File) bool {
if !imports(f, "net") {
return false
}
fixed := false
walk(f, func(n any) {
cl, ok := n.(*ast.CompositeLit)
if !ok {
return
}
se, ok := cl.Type.(*ast.SelectorExpr)
if !ok {
return
}
if !isTopName(se.X, "net") || se.Sel == nil {
return
}
switch ss := se.Sel.String(); ss {
case "IPAddr", "UDPAddr", "TCPAddr":
for i, e := range cl.Elts {
if _, ok := e.(*ast.KeyValueExpr); ok {
break
}
switch i {
case 0:
cl.Elts[i] = &ast.KeyValueExpr{
Key: ast.NewIdent("IP"),
Value: e,
}
case 1:
if elit, ok := e.(*ast.BasicLit); ok && elit.Value == "0" {
cl.Elts = slices.Delete(cl.Elts, i, i+1)
} else {
cl.Elts[i] = &ast.KeyValueExpr{
Key: ast.NewIdent("Port"),
Value: e,
}
}
}
fixed = true
}
}
})
return fixed
}

View file

@ -1,43 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
func init() {
addTestCases(netipv6zoneTests, netipv6zone)
}
var netipv6zoneTests = []testCase{
{
Name: "netipv6zone.0",
In: `package main
import "net"
func f() net.Addr {
a := &net.IPAddr{ip1}
sub(&net.UDPAddr{ip2, 12345})
c := &net.TCPAddr{IP: ip3, Port: 54321}
d := &net.TCPAddr{ip4, 0}
p := 1234
e := &net.TCPAddr{ip4, p}
return &net.TCPAddr{ip5}, nil
}
`,
Out: `package main
import "net"
func f() net.Addr {
a := &net.IPAddr{IP: ip1}
sub(&net.UDPAddr{IP: ip2, Port: 12345})
c := &net.TCPAddr{IP: ip3, Port: 54321}
d := &net.TCPAddr{IP: ip4}
p := 1234
e := &net.TCPAddr{IP: ip4, Port: p}
return &net.TCPAddr{IP: ip5}, nil
}
`,
},
}

View file

@ -4,8 +4,6 @@
package main package main
import "go/ast"
func init() { func init() {
register(printerconfigFix) register(printerconfigFix)
} }
@ -13,49 +11,6 @@ func init() {
var printerconfigFix = fix{ var printerconfigFix = fix{
name: "printerconfig", name: "printerconfig",
date: "2012-12-11", date: "2012-12-11",
f: printerconfig, f: noop,
desc: `Add element keys to Config composite literals.`, desc: `Add element keys to Config composite literals (removed).`,
}
func printerconfig(f *ast.File) bool {
if !imports(f, "go/printer") {
return false
}
fixed := false
walk(f, func(n any) {
cl, ok := n.(*ast.CompositeLit)
if !ok {
return
}
se, ok := cl.Type.(*ast.SelectorExpr)
if !ok {
return
}
if !isTopName(se.X, "printer") || se.Sel == nil {
return
}
if ss := se.Sel.String(); ss == "Config" {
for i, e := range cl.Elts {
if _, ok := e.(*ast.KeyValueExpr); ok {
break
}
switch i {
case 0:
cl.Elts[i] = &ast.KeyValueExpr{
Key: ast.NewIdent("Mode"),
Value: e,
}
case 1:
cl.Elts[i] = &ast.KeyValueExpr{
Key: ast.NewIdent("Tabwidth"),
Value: e,
}
}
fixed = true
}
}
})
return fixed
} }

View file

@ -1,37 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
func init() {
addTestCases(printerconfigTests, printerconfig)
}
var printerconfigTests = []testCase{
{
Name: "printerconfig.0",
In: `package main
import "go/printer"
func f() printer.Config {
b := printer.Config{0, 8}
c := &printer.Config{0}
d := &printer.Config{Tabwidth: 8, Mode: 0}
return printer.Config{0, 8}
}
`,
Out: `package main
import "go/printer"
func f() printer.Config {
b := printer.Config{Mode: 0, Tabwidth: 8}
c := &printer.Config{Mode: 0}
d := &printer.Config{Tabwidth: 8, Mode: 0}
return printer.Config{Mode: 0, Tabwidth: 8}
}
`,
},
}

View file

@ -11,7 +11,7 @@ require (
golang.org/x/sys v0.35.0 golang.org/x/sys v0.35.0
golang.org/x/telemetry v0.0.0-20250807160809-1a19826ec488 golang.org/x/telemetry v0.0.0-20250807160809-1a19826ec488
golang.org/x/term v0.34.0 golang.org/x/term v0.34.0
golang.org/x/tools v0.36.1-0.20250808220315-8866876b956f golang.org/x/tools v0.36.1-0.20250904192731-a09a2fba1c08
) )
require ( require (

View file

@ -22,7 +22,7 @@ golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
golang.org/x/tools v0.36.1-0.20250808220315-8866876b956f h1:9m2Iptt9ZZU5llKDJy1XUl5d13PN1ZYV16KwOvE6jOw= golang.org/x/tools v0.36.1-0.20250904192731-a09a2fba1c08 h1:KS/PXsrK6W9NdlNu8iuCiNb7KM8UFwsh8g1BUjJ9rww=
golang.org/x/tools v0.36.1-0.20250808220315-8866876b956f/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= golang.org/x/tools v0.36.1-0.20250904192731-a09a2fba1c08/go.mod h1:n+8pplxVZfXnmHBxWsfPnQRJ5vWroQDk+U2MFpjwtFY=
rsc.io/markdown v0.0.0-20240306144322-0bf8f97ee8ef h1:mqLYrXCXYEZOop9/Dbo6RPX11539nwiCNBb1icVPmw8= rsc.io/markdown v0.0.0-20240306144322-0bf8f97ee8ef h1:mqLYrXCXYEZOop9/Dbo6RPX11539nwiCNBb1icVPmw8=
rsc.io/markdown v0.0.0-20240306144322-0bf8f97ee8ef/go.mod h1:8xcPgWmwlZONN1D9bjxtHEjrUtSEa3fakVF8iaewYKQ= rsc.io/markdown v0.0.0-20240306144322-0bf8f97ee8ef/go.mod h1:8xcPgWmwlZONN1D9bjxtHEjrUtSEa3fakVF8iaewYKQ=

View file

@ -988,6 +988,15 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) {
if len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 && cfg.BuildCoverPkg == nil { if len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 && cfg.BuildCoverPkg == nil {
p.Internal.Cover.GenMeta = true p.Internal.Cover.GenMeta = true
} }
// Set coverage mode before building actions because it needs to be set
// before the first package build action for the package under test is
// created and cached, so that we can create the coverage action for it.
if cfg.BuildCover {
if p.Internal.Cover.GenMeta {
p.Internal.Cover.Mode = cfg.BuildCoverMode
}
}
} }
} }
@ -1116,11 +1125,6 @@ var windowsBadWords = []string{
func builderTest(b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts, p *load.Package, imported bool, writeCoverMetaAct *work.Action) (buildAction, runAction, printAction *work.Action, perr *load.Package, err error) { func builderTest(b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts, p *load.Package, imported bool, writeCoverMetaAct *work.Action) (buildAction, runAction, printAction *work.Action, perr *load.Package, err error) {
if len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 { if len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 {
if cfg.BuildCover {
if p.Internal.Cover.GenMeta {
p.Internal.Cover.Mode = cfg.BuildCoverMode
}
}
build := b.CompileAction(work.ModeBuild, work.ModeBuild, p) build := b.CompileAction(work.ModeBuild, work.ModeBuild, p)
run := &work.Action{ run := &work.Action{
Mode: "test run", Mode: "test run",
@ -1188,7 +1192,9 @@ func builderTest(b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts,
testBinary := testBinaryName(p) testBinary := testBinaryName(p)
testDir := b.NewObjdir() // Set testdir to compile action's objdir.
// so that the default file path stripping applies to _testmain.go.
testDir := b.CompileAction(work.ModeBuild, work.ModeBuild, pmain).Objdir
if err := b.BackgroundShell().Mkdir(testDir); err != nil { if err := b.BackgroundShell().Mkdir(testDir); err != nil {
return nil, nil, nil, nil, err return nil, nil, nil, nil, err
} }
@ -1209,10 +1215,6 @@ func builderTest(b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts,
} }
} }
// Set compile objdir to testDir we've already created,
// so that the default file path stripping applies to _testmain.go.
b.CompileAction(work.ModeBuild, work.ModeBuild, pmain).Objdir = testDir
a := b.LinkAction(work.ModeBuild, work.ModeBuild, pmain) a := b.LinkAction(work.ModeBuild, work.ModeBuild, pmain)
a.Target = testDir + testBinary + cfg.ExeSuffix a.Target = testDir + testBinary + cfg.ExeSuffix
if cfg.Goos == "windows" { if cfg.Goos == "windows" {

View file

@ -88,6 +88,8 @@ type Action struct {
TestOutput *bytes.Buffer // test output buffer TestOutput *bytes.Buffer // test output buffer
Args []string // additional args for runProgram Args []string // additional args for runProgram
Provider any // Additional information to be passed to successive actions. Similar to a Bazel provider.
triggers []*Action // inverse of deps triggers []*Action // inverse of deps
buggyInstall bool // is this a buggy install (see -linkshared)? buggyInstall bool // is this a buggy install (see -linkshared)?
@ -448,26 +450,9 @@ func (b *Builder) AutoAction(mode, depMode BuildMode, p *load.Package) *Action {
} }
// buildActor implements the Actor interface for package build // buildActor implements the Actor interface for package build
// actions. For most package builds this simply means invoking th // actions. For most package builds this simply means invoking the
// *Builder.build method; in the case of "go test -cover" for // *Builder.build method.
// a package with no test files, we stores some additional state type buildActor struct{}
// information in the build actor to help with reporting.
type buildActor struct {
// name of static meta-data file fragment emitted by the cover
// tool as part of the package build action, for selected
// "go test -cover" runs.
covMetaFileName string
}
// newBuildActor returns a new buildActor object, setting up the
// covMetaFileName field if 'genCoverMeta' flag is set.
func newBuildActor(p *load.Package, genCoverMeta bool) *buildActor {
ba := &buildActor{}
if genCoverMeta {
ba.covMetaFileName = covcmd.MetaFileForPackage(p.ImportPath)
}
return ba
}
func (ba *buildActor) Act(b *Builder, ctx context.Context, a *Action) error { func (ba *buildActor) Act(b *Builder, ctx context.Context, a *Action) error {
return b.build(ctx, a) return b.build(ctx, a)
@ -536,6 +521,63 @@ func (p *pgoActor) Act(b *Builder, ctx context.Context, a *Action) error {
return nil return nil
} }
type checkCacheProvider struct {
need uint32 // What work do successive actions within this package's build need to do? Combination of need bits used in build actions.
}
// The actor to check the cache to determine what work needs to be done for the action.
// It checks the cache and sets the need bits depending on the build mode and what's available
// in the cache, so the cover and compile actions know what to do.
// Currently, we don't cache the outputs of the individual actions composing the build
// for a single package (such as the output of the cover actor) separately from the
// output of the final build, but if we start doing so, we could schedule the run cgo
// and cgo compile actions earlier because they wouldn't depend on the builds of the
// dependencies of the package they belong to.
type checkCacheActor struct {
covMetaFileName string
buildAction *Action
}
func (cca *checkCacheActor) Act(b *Builder, ctx context.Context, a *Action) error {
buildAction := cca.buildAction
if buildAction.Mode == "build-install" {
// (*Builder).installAction can rewrite the build action with its install action,
// making the true build action its dependency. Fetch the build action in that case.
buildAction = buildAction.Deps[0]
}
pr, err := b.checkCacheForBuild(a, buildAction, cca.covMetaFileName)
if err != nil {
return err
}
a.Provider = pr
return nil
}
type coverProvider struct {
goSources, cgoSources []string // The go and cgo sources generated by the cover tool, which should be used instead of the raw sources on the package.
}
// The actor to run the cover tool to produce instrumented source files for cover
// builds. In the case of a package with no test files, we store some additional state
// information in the build actor to help with reporting.
type coverActor struct {
// name of static meta-data file fragment emitted by the cover
// tool as part of the package cover action, for selected
// "go test -cover" runs.
covMetaFileName string
buildAction *Action
}
func (ca *coverActor) Act(b *Builder, ctx context.Context, a *Action) error {
pr, err := b.runCover(a, ca.buildAction, a.Objdir, a.Package.GoFiles, a.Package.CgoFiles)
if err != nil {
return err
}
a.Provider = pr
return nil
}
// CompileAction returns the action for compiling and possibly installing // CompileAction returns the action for compiling and possibly installing
// (according to mode) the given package. The resulting action is only // (according to mode) the given package. The resulting action is only
// for building packages (archives), never for linking executables. // for building packages (archives), never for linking executables.
@ -559,7 +601,7 @@ func (b *Builder) CompileAction(mode, depMode BuildMode, p *load.Package) *Actio
a := &Action{ a := &Action{
Mode: "build", Mode: "build",
Package: p, Package: p,
Actor: newBuildActor(p, p.Internal.Cover.GenMeta), Actor: &buildActor{},
Objdir: b.NewObjdir(), Objdir: b.NewObjdir(),
} }
@ -602,6 +644,39 @@ func (b *Builder) CompileAction(mode, depMode BuildMode, p *load.Package) *Actio
} }
} }
// Determine the covmeta file name.
var covMetaFileName string
if p.Internal.Cover.GenMeta {
covMetaFileName = covcmd.MetaFileForPackage(p.ImportPath)
}
// Create a cache action.
cacheAction := &Action{
Mode: "build check cache",
Package: p,
Actor: &checkCacheActor{buildAction: a, covMetaFileName: covMetaFileName},
Objdir: a.Objdir,
Deps: a.Deps, // Need outputs of dependency build actions to generate action id.
}
a.Deps = append(a.Deps, cacheAction)
// Create a cover action if we need to instrument the code for coverage.
// The cover action always runs in the same go build invocation as the build,
// and is not cached separately, so it can use the same objdir.
var coverAction *Action
if p.Internal.Cover.Mode != "" {
coverAction = b.cacheAction("cover", p, func() *Action {
return &Action{
Mode: "cover",
Package: p,
Actor: &coverActor{buildAction: a, covMetaFileName: covMetaFileName},
Objdir: a.Objdir,
Deps: []*Action{cacheAction},
}
})
a.Deps = append(a.Deps, coverAction)
}
return a return a
}) })

View file

@ -473,10 +473,12 @@ const (
needStale needStale
) )
// build is the action for building a single package. // checkCacheForBuild checks the cache for the outputs of the buildAction to determine
// Note that any new influence on this logic must be reported in b.buildActionID above as well. // what work needs to be done by it and the actions preceding it. a is the action
func (b *Builder) build(ctx context.Context, a *Action) (err error) { // currently being run, which has an actor of type *checkCacheActor and is a dependency
p := a.Package // of the buildAction.
func (b *Builder) checkCacheForBuild(a, buildAction *Action, covMetaFileName string) (_ *checkCacheProvider, err error) {
p := buildAction.Package
sh := b.Shell(a) sh := b.Shell(a)
bit := func(x uint32, b bool) uint32 { bit := func(x uint32, b bool) uint32 {
@ -488,28 +490,31 @@ func (b *Builder) build(ctx context.Context, a *Action) (err error) {
cachedBuild := false cachedBuild := false
needCovMeta := p.Internal.Cover.GenMeta needCovMeta := p.Internal.Cover.GenMeta
need := bit(needBuild, !b.IsCmdList && a.needBuild || b.NeedExport) | need := bit(needBuild, !b.IsCmdList && buildAction.needBuild || b.NeedExport) |
bit(needCgoHdr, b.needCgoHdr(a)) | bit(needCgoHdr, b.needCgoHdr(buildAction)) |
bit(needVet, a.needVet) | bit(needVet, buildAction.needVet) |
bit(needCovMetaFile, needCovMeta) | bit(needCovMetaFile, needCovMeta) |
bit(needCompiledGoFiles, b.NeedCompiledGoFiles) bit(needCompiledGoFiles, b.NeedCompiledGoFiles)
if !p.BinaryOnly { if !p.BinaryOnly {
if b.useCache(a, b.buildActionID(a), p.Target, need&needBuild != 0) { // We pass 'a' (this checkCacheAction) to buildActionID so that we use its dependencies,
// which are the actual package dependencies, rather than the buildAction's dependencies
// which also includes this action and the cover action.
if b.useCache(buildAction, b.buildActionID(a), p.Target, need&needBuild != 0) {
// We found the main output in the cache. // We found the main output in the cache.
// If we don't need any other outputs, we can stop. // If we don't need any other outputs, we can stop.
// Otherwise, we need to write files to a.Objdir (needVet, needCgoHdr). // Otherwise, we need to write files to a.Objdir (needVet, needCgoHdr).
// Remember that we might have them in cache // Remember that we might have them in cache
// and check again after we create a.Objdir. // and check again after we create a.Objdir.
cachedBuild = true cachedBuild = true
a.output = []byte{} // start saving output in case we miss any cache results buildAction.output = []byte{} // start saving output in case we miss any cache results
need &^= needBuild need &^= needBuild
if b.NeedExport { if b.NeedExport {
p.Export = a.built p.Export = buildAction.built
p.BuildID = a.buildID p.BuildID = buildAction.buildID
} }
if need&needCompiledGoFiles != 0 { if need&needCompiledGoFiles != 0 {
if err := b.loadCachedCompiledGoFiles(a); err == nil { if err := b.loadCachedCompiledGoFiles(buildAction); err == nil {
need &^= needCompiledGoFiles need &^= needCompiledGoFiles
} }
} }
@ -518,13 +523,13 @@ func (b *Builder) build(ctx context.Context, a *Action) (err error) {
// Source files might be cached, even if the full action is not // Source files might be cached, even if the full action is not
// (e.g., go list -compiled -find). // (e.g., go list -compiled -find).
if !cachedBuild && need&needCompiledGoFiles != 0 { if !cachedBuild && need&needCompiledGoFiles != 0 {
if err := b.loadCachedCompiledGoFiles(a); err == nil { if err := b.loadCachedCompiledGoFiles(buildAction); err == nil {
need &^= needCompiledGoFiles need &^= needCompiledGoFiles
} }
} }
if need == 0 { if need == 0 {
return nil return &checkCacheProvider{need: need}, nil
} }
defer b.flushOutput(a) defer b.flushOutput(a)
} }
@ -534,6 +539,175 @@ func (b *Builder) build(ctx context.Context, a *Action) (err error) {
p.Error = &load.PackageError{Err: err} p.Error = &load.PackageError{Err: err}
} }
}() }()
if p.Error != nil {
// Don't try to build anything for packages with errors. There may be a
// problem with the inputs that makes the package unsafe to build.
return nil, p.Error
}
// TODO(matloob): return early for binary-only packages so that we don't need to indent
// the core of this function in the if !p.BinaryOnly block above.
if p.BinaryOnly {
p.Stale = true
p.StaleReason = "binary-only packages are no longer supported"
if b.IsCmdList {
return &checkCacheProvider{need: 0}, nil
}
return nil, errors.New("binary-only packages are no longer supported")
}
if p.Module != nil && !allowedVersion(p.Module.GoVersion) {
return nil, errors.New("module requires Go " + p.Module.GoVersion + " or later")
}
if err := b.checkDirectives(buildAction); err != nil {
return nil, err
}
if err := sh.Mkdir(buildAction.Objdir); err != nil {
return nil, err
}
// Load cached cgo header, but only if we're skipping the main build (cachedBuild==true).
if cachedBuild && need&needCgoHdr != 0 {
if err := b.loadCachedCgoHdr(buildAction); err == nil {
need &^= needCgoHdr
}
}
// Load cached coverage meta-data file fragment, but only if we're
// skipping the main build (cachedBuild==true).
if cachedBuild && need&needCovMetaFile != 0 {
if err := b.loadCachedObjdirFile(buildAction, cache.Default(), covMetaFileName); err == nil {
need &^= needCovMetaFile
}
}
// Load cached vet config, but only if that's all we have left
// (need == needVet, not testing just the one bit).
// If we are going to do a full build anyway,
// we're going to regenerate the files in the build action anyway.
if need == needVet {
if err := b.loadCachedVet(buildAction); err == nil {
need &^= needVet
}
}
return &checkCacheProvider{need: need}, nil
}
func (b *Builder) runCover(a, buildAction *Action, objdir string, gofiles, cgofiles []string) (*coverProvider, error) {
p := a.Package
sh := b.Shell(a)
var cacheProvider *checkCacheProvider
for _, dep := range a.Deps {
if pr, ok := dep.Provider.(*checkCacheProvider); ok {
cacheProvider = pr
}
}
if cacheProvider == nil {
base.Fatalf("internal error: could not find checkCacheProvider")
}
need := cacheProvider.need
if need == 0 {
return nil, nil
}
if err := sh.Mkdir(a.Objdir); err != nil {
return nil, err
}
gofiles = slices.Clone(gofiles)
cgofiles = slices.Clone(cgofiles)
outfiles := []string{}
infiles := []string{}
for i, file := range str.StringList(gofiles, cgofiles) {
if base.IsTestFile(file) {
continue // Not covering this file.
}
var sourceFile string
var coverFile string
if base, found := strings.CutSuffix(file, ".cgo1.go"); found {
// cgo files have absolute paths
base = filepath.Base(base)
sourceFile = file
coverFile = objdir + base + ".cgo1.go"
} else {
sourceFile = filepath.Join(p.Dir, file)
coverFile = objdir + file
}
coverFile = strings.TrimSuffix(coverFile, ".go") + ".cover.go"
infiles = append(infiles, sourceFile)
outfiles = append(outfiles, coverFile)
if i < len(gofiles) {
gofiles[i] = coverFile
} else {
cgofiles[i-len(gofiles)] = coverFile
}
}
if len(infiles) != 0 {
// Coverage instrumentation creates new top level
// variables in the target package for things like
// meta-data containers, counter vars, etc. To avoid
// collisions with user variables, suffix the var name
// with 12 hex digits from the SHA-256 hash of the
// import path. Choice of 12 digits is historical/arbitrary,
// we just need enough of the hash to avoid accidents,
// as opposed to precluding determined attempts by
// users to break things.
sum := sha256.Sum256([]byte(a.Package.ImportPath))
coverVar := fmt.Sprintf("goCover_%x_", sum[:6])
mode := a.Package.Internal.Cover.Mode
if mode == "" {
panic("covermode should be set at this point")
}
if newoutfiles, err := b.cover(a, infiles, outfiles, coverVar, mode); err != nil {
return nil, err
} else {
outfiles = newoutfiles
gofiles = append([]string{newoutfiles[0]}, gofiles...)
}
if ca, ok := a.Actor.(*coverActor); ok && ca.covMetaFileName != "" {
b.cacheObjdirFile(buildAction, cache.Default(), ca.covMetaFileName)
}
}
return &coverProvider{gofiles, cgofiles}, nil
}
// build is the action for building a single package.
// Note that any new influence on this logic must be reported in b.buildActionID above as well.
func (b *Builder) build(ctx context.Context, a *Action) (err error) {
p := a.Package
sh := b.Shell(a)
var cacheProvider *checkCacheProvider
var coverPr *coverProvider
for _, dep := range a.Deps {
switch pr := dep.Provider.(type) {
case *coverProvider:
coverPr = pr
case *checkCacheProvider:
cacheProvider = pr
}
}
if cacheProvider == nil {
base.Fatalf("internal error: could not find checkCacheProvider")
}
need := cacheProvider.need
need &^= needCovMetaFile // handled by cover action
if need == 0 {
return
}
defer b.flushOutput(a)
if cfg.BuildN { if cfg.BuildN {
// In -n mode, print a banner between packages. // In -n mode, print a banner between packages.
// The banner is five lines so that when changes to // The banner is five lines so that when changes to
@ -547,63 +721,8 @@ func (b *Builder) build(ctx context.Context, a *Action) (err error) {
sh.Printf("%s\n", p.ImportPath) sh.Printf("%s\n", p.ImportPath)
} }
if p.Error != nil {
// Don't try to build anything for packages with errors. There may be a
// problem with the inputs that makes the package unsafe to build.
return p.Error
}
if p.BinaryOnly {
p.Stale = true
p.StaleReason = "binary-only packages are no longer supported"
if b.IsCmdList {
return nil
}
return errors.New("binary-only packages are no longer supported")
}
if p.Module != nil && !allowedVersion(p.Module.GoVersion) {
return errors.New("module requires Go " + p.Module.GoVersion + " or later")
}
if err := b.checkDirectives(a); err != nil {
return err
}
if err := sh.Mkdir(a.Objdir); err != nil {
return err
}
objdir := a.Objdir objdir := a.Objdir
// Load cached cgo header, but only if we're skipping the main build (cachedBuild==true).
if cachedBuild && need&needCgoHdr != 0 {
if err := b.loadCachedCgoHdr(a); err == nil {
need &^= needCgoHdr
}
}
// Load cached coverage meta-data file fragment, but only if we're
// skipping the main build (cachedBuild==true).
if cachedBuild && need&needCovMetaFile != 0 {
bact := a.Actor.(*buildActor)
if err := b.loadCachedObjdirFile(a, cache.Default(), bact.covMetaFileName); err == nil {
need &^= needCovMetaFile
}
}
// Load cached vet config, but only if that's all we have left
// (need == needVet, not testing just the one bit).
// If we are going to do a full build anyway,
// we're going to regenerate the files below anyway.
if need == needVet {
if err := b.loadCachedVet(a); err == nil {
need &^= needVet
}
}
if need == 0 {
return nil
}
if err := AllowInstall(a); err != nil { if err := AllowInstall(a); err != nil {
return err return err
} }
@ -658,60 +777,8 @@ OverlayLoop:
// If we're doing coverage, preprocess the .go files and put them in the work directory // If we're doing coverage, preprocess the .go files and put them in the work directory
if p.Internal.Cover.Mode != "" { if p.Internal.Cover.Mode != "" {
outfiles := []string{} gofiles = coverPr.goSources
infiles := []string{} cgofiles = coverPr.cgoSources
for i, file := range str.StringList(gofiles, cgofiles) {
if base.IsTestFile(file) {
continue // Not covering this file.
}
var sourceFile string
var coverFile string
if base, found := strings.CutSuffix(file, ".cgo1.go"); found {
// cgo files have absolute paths
base = filepath.Base(base)
sourceFile = file
coverFile = objdir + base + ".cgo1.go"
} else {
sourceFile = filepath.Join(p.Dir, file)
coverFile = objdir + file
}
coverFile = strings.TrimSuffix(coverFile, ".go") + ".cover.go"
infiles = append(infiles, sourceFile)
outfiles = append(outfiles, coverFile)
if i < len(gofiles) {
gofiles[i] = coverFile
} else {
cgofiles[i-len(gofiles)] = coverFile
}
}
if len(infiles) != 0 {
// Coverage instrumentation creates new top level
// variables in the target package for things like
// meta-data containers, counter vars, etc. To avoid
// collisions with user variables, suffix the var name
// with 12 hex digits from the SHA-256 hash of the
// import path. Choice of 12 digits is historical/arbitrary,
// we just need enough of the hash to avoid accidents,
// as opposed to precluding determined attempts by
// users to break things.
sum := sha256.Sum256([]byte(a.Package.ImportPath))
coverVar := fmt.Sprintf("goCover_%x_", sum[:6])
mode := a.Package.Internal.Cover.Mode
if mode == "" {
panic("covermode should be set at this point")
}
if newoutfiles, err := b.cover(a, infiles, outfiles, coverVar, mode); err != nil {
return err
} else {
outfiles = newoutfiles
gofiles = append([]string{newoutfiles[0]}, gofiles...)
}
if ba, ok := a.Actor.(*buildActor); ok && ba.covMetaFileName != "" {
b.cacheObjdirFile(a, cache.Default(), ba.covMetaFileName)
}
}
} }
// Run SWIG on each .swig and .swigcxx file. // Run SWIG on each .swig and .swigcxx file.
@ -1209,7 +1276,7 @@ func buildVetConfig(a *Action, srcfiles []string) {
for _, a1 := range a.Deps { for _, a1 := range a.Deps {
p1 := a1.Package p1 := a1.Package
if p1 == nil || p1.ImportPath == "" { if p1 == nil || p1.ImportPath == "" || p1 == a.Package {
continue continue
} }
// Add import mapping if needed // Add import mapping if needed
@ -1951,8 +2018,8 @@ func (b *Builder) writeCoverPkgInputs(a *Action, pconfigfile string, covoutputsf
OutConfig: p.Internal.Cover.Cfg, OutConfig: p.Internal.Cover.Cfg,
Local: p.Internal.Local, Local: p.Internal.Local,
} }
if ba, ok := a.Actor.(*buildActor); ok && ba.covMetaFileName != "" { if ca, ok := a.Actor.(*coverActor); ok && ca.covMetaFileName != "" {
pcfg.EmitMetaFile = a.Objdir + ba.covMetaFileName pcfg.EmitMetaFile = a.Objdir + ca.covMetaFileName
} }
if a.Package.Module != nil { if a.Package.Module != nil {
pcfg.ModulePath = a.Package.Module.Path pcfg.ModulePath = a.Package.Module.Path

View file

@ -132,11 +132,47 @@ func (sh *Shell) moveOrCopyFile(dst, src string, perm fs.FileMode, force bool) e
return sh.CopyFile(dst, src, perm, force) return sh.CopyFile(dst, src, perm, force)
} }
if err := sh.move(src, dst, perm); err == nil { // On Windows, always copy the file, so that we respect the NTFS
if cfg.BuildX { // permissions of the parent folder. https://golang.org/issue/22343.
sh.ShowCmd("", "mv %s %s", src, dst) // What matters here is not cfg.Goos (the system we are building
// for) but runtime.GOOS (the system we are building on).
if runtime.GOOS == "windows" {
return sh.CopyFile(dst, src, perm, force)
}
// If the destination directory has the group sticky bit set,
// we have to copy the file to retain the correct permissions.
// https://golang.org/issue/18878
if fi, err := os.Stat(filepath.Dir(dst)); err == nil {
if fi.IsDir() && (fi.Mode()&fs.ModeSetgid) != 0 {
return sh.CopyFile(dst, src, perm, force)
}
}
// The perm argument is meant to be adjusted according to umask,
// but we don't know what the umask is.
// Create a dummy file to find out.
// This avoids build tags and works even on systems like Plan 9
// where the file mask computation incorporates other information.
mode := perm
f, err := os.OpenFile(filepath.Clean(dst)+"-go-tmp-umask", os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm)
if err == nil {
fi, err := f.Stat()
if err == nil {
mode = fi.Mode() & 0777
}
name := f.Name()
f.Close()
os.Remove(name)
}
if err := os.Chmod(src, mode); err == nil {
if err := os.Rename(src, dst); err == nil {
if cfg.BuildX {
sh.ShowCmd("", "mv %s %s", src, dst)
}
return nil
} }
return nil
} }
return sh.CopyFile(dst, src, perm, force) return sh.CopyFile(dst, src, perm, force)

View file

@ -1,49 +0,0 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !windows
package work
import (
"errors"
"io/fs"
"os"
"path/filepath"
)
// move moves a file from src to dst setting the permissions
// on the destination file to inherit the permissions from the
// destination parent directory.
func (sh *Shell) move(src, dst string, perm fs.FileMode) error {
// If the destination directory has the group sticky bit set,
// we have to copy the file to retain the correct permissions.
// https://golang.org/issue/18878
if fi, err := os.Stat(filepath.Dir(dst)); err == nil {
if fi.IsDir() && (fi.Mode()&fs.ModeSetgid) != 0 {
return errors.ErrUnsupported
}
}
// The perm argument is meant to be adjusted according to umask,
// but we don't know what the umask is.
// Create a dummy file to find out.
// This works even on systems like Plan 9 where the
// file mask computation incorporates other information.
mode := perm
f, err := os.OpenFile(filepath.Clean(dst)+"-go-tmp-umask", os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm)
if err == nil {
fi, err := f.Stat()
if err == nil {
mode = fi.Mode() & 0777
}
name := f.Name()
f.Close()
os.Remove(name)
}
if err := os.Chmod(src, mode); err != nil {
return err
}
return os.Rename(src, dst)
}

View file

@ -1,37 +0,0 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package work
import (
"internal/syscall/windows"
"io/fs"
"os"
"unsafe"
)
// move moves a file from src to dst, setting the security information
// on the destination file to inherit the permissions from the
// destination parent directory.
func (sh *Shell) move(src, dst string, perm fs.FileMode) (err error) {
if err := os.Rename(src, dst); err != nil {
return err
}
defer func() {
if err != nil {
os.Remove(dst) // clean up if we failed to set the mode or security info
}
}()
if err := os.Chmod(dst, perm); err != nil {
return err
}
// We need to respect the ACL permissions of the destination parent folder.
// https://go.dev/issue/22343.
var acl windows.ACL
if err := windows.InitializeAcl(&acl, uint32(unsafe.Sizeof(acl)), windows.ACL_REVISION); err != nil {
return err
}
secInfo := windows.DACL_SECURITY_INFORMATION | windows.UNPROTECTED_DACL_SECURITY_INFORMATION
return windows.SetNamedSecurityInfo(dst, windows.SE_FILE_OBJECT, secInfo, nil, nil, &acl, nil)
}

View file

@ -1,12 +0,0 @@
golang.org/toolchain@v0.0.1-go1.999testmod.windows-arm
-- .mod --
module golang.org/toolchain
-- .info --
{"Version":"v0.0.1-go1.999testmod.windows-arm"}
-- go.mod --
module golang.org/toolchain
-- bin/go.bat --
@echo go1.999testmod here!
-- pkg/tool/fake --
-- lib/wasm/go_js_wasm_exec --

View file

@ -225,8 +225,6 @@ const (
REGZERO = REG_R0 // set to zero REGZERO = REG_R0 // set to zero
REGLINK = REG_R1 REGLINK = REG_R1
REGSP = REG_R3 REGSP = REG_R3
REGRT1 = REG_R20 // reserved for runtime, duffzero and duffcopy
REGRT2 = REG_R21 // reserved for runtime, duffcopy
REGCTXT = REG_R29 // context for closures REGCTXT = REG_R29 // context for closures
REGG = REG_R22 // G in loong64 REGG = REG_R22 // G in loong64
REGTMP = REG_R30 // used by the assembler REGTMP = REG_R30 // used by the assembler
@ -567,6 +565,9 @@ const (
AMOVVF AMOVVF
AMOVVD AMOVVD
// 2.2.1.2
AADDV16
// 2.2.1.3 // 2.2.1.3
AALSLW AALSLW
AALSLWU AALSLWU

View file

@ -125,6 +125,7 @@ var Anames = []string{
"MOVDV", "MOVDV",
"MOVVF", "MOVVF",
"MOVVD", "MOVVD",
"ADDV16",
"ALSLW", "ALSLW",
"ALSLWU", "ALSLWU",
"ALSLV", "ALSLV",

View file

@ -267,6 +267,9 @@ var optab = []Optab{
{AADDV, C_U12CON, C_REG, C_NONE, C_REG, C_NONE, 10, 8, 0, 0}, {AADDV, C_U12CON, C_REG, C_NONE, C_REG, C_NONE, 10, 8, 0, 0},
{AADDV, C_U12CON, C_NONE, C_NONE, C_REG, C_NONE, 10, 8, 0, 0}, {AADDV, C_U12CON, C_NONE, C_NONE, C_REG, C_NONE, 10, 8, 0, 0},
{AADDV16, C_32CON, C_REG, C_NONE, C_REG, C_NONE, 4, 4, 0, 0},
{AADDV16, C_32CON, C_NONE, C_NONE, C_REG, C_NONE, 4, 4, 0, 0},
{AAND, C_UU12CON, C_REG, C_NONE, C_REG, C_NONE, 4, 4, 0, 0}, {AAND, C_UU12CON, C_REG, C_NONE, C_REG, C_NONE, 4, 4, 0, 0},
{AAND, C_UU12CON, C_NONE, C_NONE, C_REG, C_NONE, 4, 4, 0, 0}, {AAND, C_UU12CON, C_NONE, C_NONE, C_REG, C_NONE, 4, 4, 0, 0},
{AAND, C_S12CON, C_REG, C_NONE, C_REG, C_NONE, 10, 8, 0, 0}, {AAND, C_S12CON, C_REG, C_NONE, C_REG, C_NONE, 10, 8, 0, 0},
@ -440,8 +443,6 @@ var optab = []Optab{
{obj.ANOP, C_DCON, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0}, // nop variants, see #40689 {obj.ANOP, C_DCON, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0}, // nop variants, see #40689
{obj.ANOP, C_REG, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0}, {obj.ANOP, C_REG, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0},
{obj.ANOP, C_FREG, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0}, {obj.ANOP, C_FREG, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0},
{obj.ADUFFZERO, C_NONE, C_NONE, C_NONE, C_BRAN, C_NONE, 11, 4, 0, 0}, // same as AJMP
{obj.ADUFFCOPY, C_NONE, C_NONE, C_NONE, C_BRAN, C_NONE, 11, 4, 0, 0}, // same as AJMP
} }
var atomicInst = map[obj.As]uint32{ var atomicInst = map[obj.As]uint32{
@ -1522,13 +1523,12 @@ func buildop(ctxt *obj.Link) {
APRELD, APRELD,
APRELDX, APRELDX,
AFSEL, AFSEL,
AADDV16,
obj.ANOP, obj.ANOP,
obj.ATEXT, obj.ATEXT,
obj.AFUNCDATA, obj.AFUNCDATA,
obj.APCALIGN, obj.APCALIGN,
obj.APCDATA, obj.APCDATA:
obj.ADUFFZERO,
obj.ADUFFCOPY:
break break
case ARDTIMELW: case ARDTIMELW:
@ -1983,6 +1983,18 @@ func OP_12IRR(op uint32, i uint32, r2 uint32, r3 uint32) uint32 {
return op | (i&0xFFF)<<10 | (r2&0x1F)<<5 | (r3&0x1F)<<0 return op | (i&0xFFF)<<10 | (r2&0x1F)<<5 | (r3&0x1F)<<0
} }
func OP_11IRR(op uint32, i uint32, r2 uint32, r3 uint32) uint32 {
return op | (i&0x7FF)<<10 | (r2&0x1F)<<5 | (r3&0x1F)<<0
}
func OP_10IRR(op uint32, i uint32, r2 uint32, r3 uint32) uint32 {
return op | (i&0x3FF)<<10 | (r2&0x1F)<<5 | (r3&0x1F)<<0
}
func OP_9IRR(op uint32, i uint32, r2 uint32, r3 uint32) uint32 {
return op | (i&0x1FF)<<10 | (r2&0x1F)<<5 | (r3&0x1F)<<0
}
func OP_8IRR(op uint32, i uint32, r2 uint32, r3 uint32) uint32 { func OP_8IRR(op uint32, i uint32, r2 uint32, r3 uint32) uint32 {
return op | (i&0xFF)<<10 | (r2&0x1F)<<5 | (r3&0x1F)<<0 return op | (i&0xFF)<<10 | (r2&0x1F)<<5 | (r3&0x1F)<<0
} }
@ -2079,7 +2091,14 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
if r == 0 { if r == 0 {
r = int(p.To.Reg) r = int(p.To.Reg)
} }
o1 = OP_12IRR(c.opirr(p.As), uint32(v), uint32(r), uint32(p.To.Reg)) if p.As == AADDV16 {
if v&65535 != 0 {
c.ctxt.Diag("%v: the constant must be a multiple of 65536.\n", p)
}
o1 = OP_16IRR(c.opirr(p.As), uint32(v>>16), uint32(r), uint32(p.To.Reg))
} else {
o1 = OP_12IRR(c.opirr(p.As), uint32(v), uint32(r), uint32(p.To.Reg))
}
case 5: // syscall case 5: // syscall
v := c.regoff(&p.From) v := c.regoff(&p.From)
@ -2535,7 +2554,28 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
si := c.regoff(&p.From) si := c.regoff(&p.From)
Rj := uint32(p.From.Reg & EXT_REG_MASK) Rj := uint32(p.From.Reg & EXT_REG_MASK)
Vd := uint32(p.To.Reg & EXT_REG_MASK) Vd := uint32(p.To.Reg & EXT_REG_MASK)
o1 = v | uint32(si<<10) | (Rj << 5) | Vd switch v & 0xc00000 {
case 0x800000: // [x]vldrepl.b
o1 = OP_12IRR(v, uint32(si), Rj, Vd)
case 0x400000: // [x]vldrepl.h
if si&1 != 0 {
c.ctxt.Diag("%v: offset must be a multiple of 2.\n", p)
}
o1 = OP_11IRR(v, uint32(si>>1), Rj, Vd)
case 0x0:
switch v & 0x300000 {
case 0x200000: // [x]vldrepl.w
if si&3 != 0 {
c.ctxt.Diag("%v: offset must be a multiple of 4.\n", p)
}
o1 = OP_10IRR(v, uint32(si>>2), Rj, Vd)
case 0x100000: // [x]vldrepl.d
if si&7 != 0 {
c.ctxt.Diag("%v: offset must be a multiple of 8.\n", p)
}
o1 = OP_9IRR(v, uint32(si>>3), Rj, Vd)
}
}
case 47: // preld offset(Rbase), $hint case 47: // preld offset(Rbase), $hint
offs := c.regoff(&p.From) offs := c.regoff(&p.From)
@ -4004,12 +4044,12 @@ func (c *ctxt0) opirr(a obj.As) uint32 {
return 0x00b << 22 return 0x00b << 22
case AADDVU: case AADDVU:
return 0x00b << 22 return 0x00b << 22
case AADDV16:
return 0x4 << 26
case AJMP: case AJMP:
return 0x14 << 26 return 0x14 << 26
case AJAL, case AJAL:
obj.ADUFFZERO,
obj.ADUFFCOPY:
return 0x15 << 26 return 0x15 << 26
case AJIRL: case AJIRL:

View file

@ -220,6 +220,15 @@ Note: In the following sections 3.1 to 3.6, "ui4" (4-bit unsigned int immediate)
XVMOVQ offset(Rj), Xd.W8 | xvldrepl.w Xd, Rj, si10 | for i in range(8) : XR[xd].w[i] = load 32 bit memory data from (GR[rj]+SignExtend(si10<<2)) XVMOVQ offset(Rj), Xd.W8 | xvldrepl.w Xd, Rj, si10 | for i in range(8) : XR[xd].w[i] = load 32 bit memory data from (GR[rj]+SignExtend(si10<<2))
XVMOVQ offset(Rj), Xd.V4 | xvldrepl.d Xd, Rj, si9 | for i in range(4) : XR[xd].d[i] = load 64 bit memory data from (GR[rj]+SignExtend(si9<<3)) XVMOVQ offset(Rj), Xd.V4 | xvldrepl.d Xd, Rj, si9 | for i in range(4) : XR[xd].d[i] = load 64 bit memory data from (GR[rj]+SignExtend(si9<<3))
note: In Go assembly, for ease of understanding, offset representing the actual address offset.
However, during platform encoding, the offset is shifted to increase the encodable offset range, as follows:
Go assembly | platform assembly
VMOVQ 1(R4), V5.B16 | vldrepl.b v5, r4, $1
VMOVQ 2(R4), V5.H8 | vldrepl.h v5, r4, $1
VMOVQ 8(R4), V5.W4 | vldrepl.w v5, r4, $2
VMOVQ 8(R4), V5.V2 | vldrepl.d v5, r4, $1
# Special instruction encoding definition and description on LoongArch # Special instruction encoding definition and description on LoongArch
1. DBAR hint encoding for LA664(Loongson 3A6000) and later micro-architectures, paraphrased 1. DBAR hint encoding for LA664(Loongson 3A6000) and later micro-architectures, paraphrased
@ -317,6 +326,18 @@ Note: In the following sections 3.1 to 3.6, "ui4" (4-bit unsigned int immediate)
Go assembly | platform assembly Go assembly | platform assembly
MOVWP 8(R4), R5 | ldptr.w r5, r4, $2 MOVWP 8(R4), R5 | ldptr.w r5, r4, $2
6. Note of special add instrction
Mapping between Go and platform assembly:
Go assembly | platform assembly
ADDV16 si16<<16, Rj, Rd | addu16i.d rd, rj, si16
note: si16 is a 16-bit immediate number, and si16<<16 is the actual operand.
The addu16i.d instruction logically left-shifts the 16-bit immediate number si16 by 16 bits, then
sign-extends it. The resulting data is added to the [63:0] bits of data in the general-purpose register
rj, and the sum is written into the general-purpose register rd.
The addu16i.d instruction is used in conjunction with the ldptr.w/d and stptr.w/d instructions to
accelerate access based on the GOT table in position-independent code.
*/ */
package loong64 package loong64

View file

@ -17,11 +17,7 @@ import (
func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
// Rewrite JMP/JAL to symbol as TYPE_BRANCH. // Rewrite JMP/JAL to symbol as TYPE_BRANCH.
switch p.As { switch p.As {
case AJMP, case AJMP, AJAL, ARET:
AJAL,
ARET,
obj.ADUFFZERO,
obj.ADUFFCOPY:
if p.To.Sym != nil { if p.To.Sym != nil {
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
} }
@ -93,40 +89,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
} }
func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
// ADUFFxxx $offset
// becomes
// MOVV runtime.duffxxx@GOT, REGTMP
// ADD $offset, REGTMP
// JAL REGTMP
if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO {
var sym *obj.LSym
if p.As == obj.ADUFFZERO {
sym = ctxt.LookupABI("runtime.duffzero", obj.ABIInternal)
} else {
sym = ctxt.LookupABI("runtime.duffcopy", obj.ABIInternal)
}
offset := p.To.Offset
p.As = AMOVV
p.From.Type = obj.TYPE_MEM
p.From.Sym = sym
p.From.Name = obj.NAME_GOTREF
p.To.Type = obj.TYPE_REG
p.To.Reg = REGTMP
p.To.Name = obj.NAME_NONE
p.To.Offset = 0
p.To.Sym = nil
p1 := obj.Appendp(p, newprog)
p1.As = AADDV
p1.From.Type = obj.TYPE_CONST
p1.From.Offset = offset
p1.To.Type = obj.TYPE_REG
p1.To.Reg = REGTMP
p2 := obj.Appendp(p1, newprog)
p2.As = AJAL
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = REGTMP
}
// We only care about global data: NAME_EXTERN means a global // We only care about global data: NAME_EXTERN means a global
// symbol in the Go sense, and p.Sym.Local is true for a few // symbol in the Go sense, and p.Sym.Local is true for a few
// internally defined symbols. // internally defined symbols.
@ -256,9 +218,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
} }
} }
case AJAL, case AJAL:
obj.ADUFFZERO,
obj.ADUFFCOPY:
c.cursym.Func().Text.Mark &^= LEAF c.cursym.Func().Text.Mark &^= LEAF
fallthrough fallthrough

View file

@ -37,7 +37,7 @@ func buildop(ctxt *obj.Link) {}
func jalToSym(ctxt *obj.Link, p *obj.Prog, lr int16) { func jalToSym(ctxt *obj.Link, p *obj.Prog, lr int16) {
switch p.As { switch p.As {
case obj.ACALL, obj.AJMP, obj.ARET, obj.ADUFFZERO, obj.ADUFFCOPY: case obj.ACALL, obj.AJMP, obj.ARET:
default: default:
ctxt.Diag("unexpected Prog in jalToSym: %v", p) ctxt.Diag("unexpected Prog in jalToSym: %v", p)
return return
@ -162,42 +162,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
// Rewrite p, if necessary, to access global data via the global offset table. // Rewrite p, if necessary, to access global data via the global offset table.
func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO {
// ADUFFxxx $offset
// becomes
// MOV runtime.duffxxx@GOT, REG_TMP
// ADD $offset, REG_TMP
// CALL REG_TMP
var sym *obj.LSym
if p.As == obj.ADUFFCOPY {
sym = ctxt.LookupABI("runtime.duffcopy", obj.ABIInternal)
} else {
sym = ctxt.LookupABI("runtime.duffzero", obj.ABIInternal)
}
offset := p.To.Offset
p.As = AMOV
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_GOTREF
p.From.Sym = sym
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_TMP
p.To.Name = obj.NAME_NONE
p.To.Offset = 0
p.To.Sym = nil
p1 := obj.Appendp(p, newprog)
p1.As = AADD
p1.From.Type = obj.TYPE_CONST
p1.From.Offset = offset
p1.To.Type = obj.TYPE_REG
p1.To.Reg = REG_TMP
p2 := obj.Appendp(p1, newprog)
p2.As = obj.ACALL
p2.To.Type = obj.TYPE_REG
p2.To.Reg = REG_TMP
}
// We only care about global data: NAME_EXTERN means a global // We only care about global data: NAME_EXTERN means a global
// symbol in the Go sense and p.Sym.Local is true for a few internally // symbol in the Go sense and p.Sym.Local is true for a few internally
// defined symbols. // defined symbols.
@ -407,7 +371,7 @@ func containsCall(sym *obj.LSym) bool {
// CALLs are CALL or JAL(R) with link register LR. // CALLs are CALL or JAL(R) with link register LR.
for p := sym.Func().Text; p != nil; p = p.Link { for p := sym.Func().Text; p != nil; p = p.Link {
switch p.As { switch p.As {
case obj.ACALL, obj.ADUFFZERO, obj.ADUFFCOPY: case obj.ACALL:
return true return true
case AJAL, AJALR: case AJAL, AJALR:
if p.From.Type == obj.TYPE_REG && p.From.Reg == REG_LR { if p.From.Type == obj.TYPE_REG && p.From.Reg == REG_LR {
@ -586,7 +550,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
p.From.Reg = REG_SP p.From.Reg = REG_SP
} }
case obj.ACALL, obj.ADUFFZERO, obj.ADUFFCOPY: case obj.ACALL:
switch p.To.Type { switch p.To.Type {
case obj.TYPE_MEM: case obj.TYPE_MEM:
jalToSym(ctxt, p, REG_LR) jalToSym(ctxt, p, REG_LR)
@ -2634,8 +2598,6 @@ var instructions = [ALAST & obj.AMask]instructionData{
obj.APCDATA: {enc: pseudoOpEncoding}, obj.APCDATA: {enc: pseudoOpEncoding},
obj.ATEXT: {enc: pseudoOpEncoding}, obj.ATEXT: {enc: pseudoOpEncoding},
obj.ANOP: {enc: pseudoOpEncoding}, obj.ANOP: {enc: pseudoOpEncoding},
obj.ADUFFZERO: {enc: pseudoOpEncoding},
obj.ADUFFCOPY: {enc: pseudoOpEncoding},
obj.APCALIGN: {enc: pseudoOpEncoding}, obj.APCALIGN: {enc: pseudoOpEncoding},
} }

View file

@ -59,6 +59,12 @@ package main
func main() {} func main() {}
` `
var goSourceWithData = `
package main
var globalVar = 42
func main() { println(&globalVar) }
`
// The linker used to crash if an ELF input file had multiple text sections // The linker used to crash if an ELF input file had multiple text sections
// with the same name. // with the same name.
func TestSectionsWithSameName(t *testing.T) { func TestSectionsWithSameName(t *testing.T) {
@ -569,3 +575,106 @@ func TestFlagR(t *testing.T) {
t.Errorf("executable failed to run: %v\n%s", err, out) t.Errorf("executable failed to run: %v\n%s", err, out)
} }
} }
func TestFlagD(t *testing.T) {
// Test that using the -D flag to specify data section address generates
// a working binary with data at the specified address.
t.Parallel()
testFlagD(t, "0x10000000", "", 0x10000000)
}
func TestFlagDUnaligned(t *testing.T) {
// Test that using the -D flag with an unaligned address errors out
t.Parallel()
testFlagDError(t, "0x10000123", "", "invalid -D value 0x10000123")
}
func TestFlagDWithR(t *testing.T) {
// Test that using the -D flag with -R flag errors on unaligned address.
t.Parallel()
testFlagDError(t, "0x30001234", "8192", "invalid -D value 0x30001234")
}
func testFlagD(t *testing.T, dataAddr string, roundQuantum string, expectedAddr uint64) {
testenv.MustHaveGoBuild(t)
tmpdir := t.TempDir()
src := filepath.Join(tmpdir, "x.go")
if err := os.WriteFile(src, []byte(goSourceWithData), 0444); err != nil {
t.Fatal(err)
}
exe := filepath.Join(tmpdir, "x.exe")
// Build linker flags
ldflags := "-D=" + dataAddr
if roundQuantum != "" {
ldflags += " -R=" + roundQuantum
}
cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-ldflags="+ldflags, "-o", exe, src)
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("build failed: %v, output:\n%s", err, out)
}
cmd = testenv.Command(t, exe)
if out, err := cmd.CombinedOutput(); err != nil {
t.Errorf("executable failed to run: %v\n%s", err, out)
}
ef, err := elf.Open(exe)
if err != nil {
t.Fatalf("open elf file failed: %v", err)
}
defer ef.Close()
// Find the first data-related section to verify segment placement
var firstDataSectionAddr uint64
var found bool = false
for _, sec := range ef.Sections {
if sec.Type == elf.SHT_PROGBITS || sec.Type == elf.SHT_NOBITS {
// These sections are writable, allocated at runtime, but not executable
isWrite := sec.Flags&elf.SHF_WRITE != 0
isExec := sec.Flags&elf.SHF_EXECINSTR != 0
isAlloc := sec.Flags&elf.SHF_ALLOC != 0
if isWrite && !isExec && isAlloc {
addrLower := sec.Addr < firstDataSectionAddr
if !found || addrLower {
firstDataSectionAddr = sec.Addr
found = true
}
}
}
}
if !found {
t.Fatalf("can't find any writable data sections")
}
if firstDataSectionAddr != expectedAddr {
t.Errorf("data section starts at 0x%x, expected 0x%x", firstDataSectionAddr, expectedAddr)
}
}
func testFlagDError(t *testing.T, dataAddr string, roundQuantum string, expectedError string) {
testenv.MustHaveGoBuild(t)
tmpdir := t.TempDir()
src := filepath.Join(tmpdir, "x.go")
if err := os.WriteFile(src, []byte(goSourceWithData), 0444); err != nil {
t.Fatal(err)
}
exe := filepath.Join(tmpdir, "x.exe")
// Build linker flags
ldflags := "-D=" + dataAddr
if roundQuantum != "" {
ldflags += " -R=" + roundQuantum
}
cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-ldflags="+ldflags, "-o", exe, src)
out, err := cmd.CombinedOutput()
if err == nil {
t.Fatalf("expected build to fail with unaligned data address, but it succeeded")
}
if !strings.Contains(string(out), expectedError) {
t.Errorf("expected error message to contain %q, got:\n%s", expectedError, out)
}
}

View file

@ -2881,7 +2881,12 @@ func (ctxt *Link) address() []*sym.Segment {
} }
order = append(order, &Segdata) order = append(order, &Segdata)
Segdata.Rwx = 06 Segdata.Rwx = 06
Segdata.Vaddr = va if *FlagDataAddr != -1 {
Segdata.Vaddr = uint64(*FlagDataAddr)
va = Segdata.Vaddr
} else {
Segdata.Vaddr = va
}
var data *sym.Section var data *sym.Section
var noptr *sym.Section var noptr *sym.Section
var bss *sym.Section var bss *sym.Section

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build darwin || (freebsd && go1.21) || linux || (netbsd && go1.25) //go:build darwin || freebsd || linux || (netbsd && go1.25)
package ld package ld

View file

@ -442,3 +442,25 @@ func d()
t.Errorf("Trampoline b-tramp0 exists unnecessarily") t.Errorf("Trampoline b-tramp0 exists unnecessarily")
} }
} }
func TestRounding(t *testing.T) {
testCases := []struct {
input int64
quantum int64
expected int64
}{
{0x30000000, 0x2000, 0x30000000}, // Already aligned
{0x30002000, 0x2000, 0x30002000}, // Exactly on boundary
{0x30001234, 0x2000, 0x30002000},
{0x30001000, 0x2000, 0x30002000},
{0x30001fff, 0x2000, 0x30002000},
}
for _, tc := range testCases {
result := Rnd(tc.input, tc.quantum)
if result != tc.expected {
t.Errorf("Rnd(0x%x, 0x%x) = 0x%x, expected 0x%x",
tc.input, tc.quantum, result, tc.expected)
}
}
}

View file

@ -105,6 +105,7 @@ var (
FlagStrictDups = flag.Int("strictdups", 0, "sanity check duplicate symbol contents during object file reading (1=warn 2=err).") FlagStrictDups = flag.Int("strictdups", 0, "sanity check duplicate symbol contents during object file reading (1=warn 2=err).")
FlagRound = flag.Int64("R", -1, "set address rounding `quantum`") FlagRound = flag.Int64("R", -1, "set address rounding `quantum`")
FlagTextAddr = flag.Int64("T", -1, "set the start address of text symbols") FlagTextAddr = flag.Int64("T", -1, "set the start address of text symbols")
FlagDataAddr = flag.Int64("D", -1, "set the start address of data symbols")
FlagFuncAlign = flag.Int("funcalign", 0, "set function align to `N` bytes") FlagFuncAlign = flag.Int("funcalign", 0, "set function align to `N` bytes")
flagEntrySymbol = flag.String("E", "", "set `entry` symbol name") flagEntrySymbol = flag.String("E", "", "set `entry` symbol name")
flagPruneWeakMap = flag.Bool("pruneweakmap", true, "prune weak mapinit refs") flagPruneWeakMap = flag.Bool("pruneweakmap", true, "prune weak mapinit refs")
@ -317,6 +318,10 @@ func Main(arch *sys.Arch, theArch Arch) {
bench.Start("Archinit") bench.Start("Archinit")
thearch.Archinit(ctxt) thearch.Archinit(ctxt)
if *FlagDataAddr != -1 && *FlagDataAddr%*FlagRound != 0 {
Exitf("invalid -D value 0x%x: not aligned to rounding quantum 0x%x", *FlagDataAddr, *FlagRound)
}
if ctxt.linkShared && !ctxt.IsELF { if ctxt.linkShared && !ctxt.IsELF {
Exitf("-linkshared can only be used on elf systems") Exitf("-linkshared can only be used on elf systems")
} }

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build (freebsd && go1.21) || (netbsd && go1.25) //go:build freebsd || (netbsd && go1.25)
package ld package ld

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build !darwin && !(freebsd && go1.21) && !linux && !(netbsd && go1.25) //go:build !darwin && !freebsd && !linux && !(netbsd && go1.25)
package ld package ld

View file

@ -153,10 +153,6 @@ func makeComputePprofFunc(state trace.GoState, trackReason func(string) bool) co
if ev.Kind() != trace.EventStateTransition { if ev.Kind() != trace.EventStateTransition {
continue continue
} }
stack := ev.Stack()
if stack == trace.NoStack {
continue
}
// The state transition has to apply to a goroutine. // The state transition has to apply to a goroutine.
st := ev.StateTransition() st := ev.StateTransition()

103
src/cmd/trace/pprof_test.go Normal file
View file

@ -0,0 +1,103 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"net/http"
"os"
"runtime/trace"
"strings"
"testing"
"testing/synctest"
"time"
"internal/trace/testtrace"
)
// Regression test for go.dev/issue/74850.
func TestSyscallProfile74850(t *testing.T) {
testtrace.MustHaveSyscallEvents(t)
var buf bytes.Buffer
err := trace.Start(&buf)
if err != nil {
t.Fatalf("start tracing: %v", err)
}
synctest.Test(t, func(t *testing.T) {
go hidden1(t)
go hidden2(t)
go visible(t)
synctest.Wait()
time.Sleep(1 * time.Millisecond)
synctest.Wait()
})
trace.Stop()
if t.Failed() {
return
}
parsed, err := parseTrace(&buf, int64(buf.Len()))
if err != nil {
t.Fatalf("parsing trace: %v", err)
}
records, err := pprofByGoroutine(computePprofSyscall(), parsed)(&http.Request{})
if err != nil {
t.Fatalf("failed to generate pprof: %v\n", err)
}
for _, r := range records {
t.Logf("Record: n=%d, total=%v", r.Count, r.Time)
for _, f := range r.Stack {
t.Logf("\t%s", f.Func)
t.Logf("\t\t%s:%d @ 0x%x", f.File, f.Line, f.PC)
}
}
if len(records) == 0 {
t.Error("empty profile")
}
// Make sure we see the right frames.
wantSymbols := []string{"cmd/trace.visible", "cmd/trace.hidden1", "cmd/trace.hidden2"}
haveSymbols := make([]bool, len(wantSymbols))
for _, r := range records {
for _, f := range r.Stack {
for i, s := range wantSymbols {
if strings.Contains(f.Func, s) {
haveSymbols[i] = true
}
}
}
}
for i, have := range haveSymbols {
if !have {
t.Errorf("expected %s in syscall profile", wantSymbols[i])
}
}
}
func stat(t *testing.T) {
_, err := os.Stat(".")
if err != nil {
t.Errorf("os.Stat: %v", err)
}
}
func hidden1(t *testing.T) {
stat(t)
}
func hidden2(t *testing.T) {
stat(t)
stat(t)
}
func visible(t *testing.T) {
stat(t)
time.Sleep(1 * time.Millisecond)
}

View file

@ -0,0 +1,284 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package analysisflags
// This file defines the -fix logic common to unitchecker and
// {single,multi}checker.
import (
"fmt"
"go/format"
"go/token"
"log"
"maps"
"os"
"sort"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/internal/analysisinternal"
"golang.org/x/tools/internal/diff"
)
// FixAction abstracts a checker action (running one analyzer on one
// package) for the purposes of applying its diagnostics' fixes.
type FixAction struct {
Name string // e.g. "analyzer@package"
FileSet *token.FileSet
ReadFileFunc analysisinternal.ReadFileFunc
Diagnostics []analysis.Diagnostic
}
// ApplyFixes attempts to apply the first suggested fix associated
// with each diagnostic reported by the specified actions.
// All fixes must have been validated by [analysisinternal.ValidateFixes].
//
// Each fix is treated as an independent change; fixes are merged in
// an arbitrary deterministic order as if by a three-way diff tool
// such as the UNIX diff3 command or 'git merge'. Any fix that cannot be
// cleanly merged is discarded, in which case the final summary tells
// the user to re-run the tool.
// TODO(adonovan): make the checker tool re-run the analysis itself.
//
// When the same file is analyzed as a member of both a primary
// package "p" and a test-augmented package "p [p.test]", there may be
// duplicate diagnostics and fixes. One set of fixes will be applied
// and the other will be discarded; but re-running the tool may then
// show zero fixes, which may cause the confused user to wonder what
// happened to the other ones.
// TODO(adonovan): consider pre-filtering completely identical fixes.
//
// A common reason for overlapping fixes is duplicate additions of the
// same import. The merge algorithm may often cleanly resolve such
// fixes, coalescing identical edits, but the merge may sometimes be
// confused by nearby changes.
//
// Even when merging succeeds, there is no guarantee that the
// composition of the two fixes is semantically correct. Coalescing
// identical edits is appropriate for imports, but not for, say,
// increments to a counter variable; the correct resolution in that
// case might be to increment it twice. Or consider two fixes that
// each delete the penultimate reference to an import or local
// variable: each fix is sound individually, and they may be textually
// distant from each other, but when both are applied, the program is
// no longer valid because it has an unreferenced import or local
// variable.
// TODO(adonovan): investigate replacing the final "gofmt" step with a
// formatter that applies the unused-import deletion logic of
// "goimports".
//
// Merging depends on both the order of fixes and they order of edits
// within them. For example, if three fixes add import "a" twice and
// import "b" once, the two imports of "a" may be combined if they
// appear in order [a, a, b], or not if they appear as [a, b, a].
// TODO(adonovan): investigate an algebraic approach to imports;
// that is, for fixes to Go source files, convert changes within the
// import(...) portion of the file into semantic edits, compose those
// edits algebraically, then convert the result back to edits.
//
// applyFixes returns success if all fixes are valid, could be cleanly
// merged, and the corresponding files were successfully updated.
//
// If the -diff flag was set, instead of updating the files it display the final
// patch composed of all the cleanly merged fixes.
//
// TODO(adonovan): handle file-system level aliases such as symbolic
// links using robustio.FileID.
func ApplyFixes(actions []FixAction, verbose bool) error {
// Select fixes to apply.
//
// If there are several for a given Diagnostic, choose the first.
// Preserve the order of iteration, for determinism.
type fixact struct {
fix *analysis.SuggestedFix
act FixAction
}
var fixes []*fixact
for _, act := range actions {
for _, diag := range act.Diagnostics {
for i := range diag.SuggestedFixes {
fix := &diag.SuggestedFixes[i]
if i == 0 {
fixes = append(fixes, &fixact{fix, act})
} else {
// TODO(adonovan): abstract the logger.
log.Printf("%s: ignoring alternative fix %q", act.Name, fix.Message)
}
}
}
}
// Read file content on demand, from the virtual
// file system that fed the analyzer (see #62292).
//
// This cache assumes that all successful reads for the same
// file name return the same content.
// (It is tempting to group fixes by package and do the
// merge/apply/format steps one package at a time, but
// packages are not disjoint, due to test variants, so this
// would not really address the issue.)
baselineContent := make(map[string][]byte)
getBaseline := func(readFile analysisinternal.ReadFileFunc, filename string) ([]byte, error) {
content, ok := baselineContent[filename]
if !ok {
var err error
content, err = readFile(filename)
if err != nil {
return nil, err
}
baselineContent[filename] = content
}
return content, nil
}
// Apply each fix, updating the current state
// only if the entire fix can be cleanly merged.
accumulatedEdits := make(map[string][]diff.Edit)
goodFixes := 0
fixloop:
for _, fixact := range fixes {
// Convert analysis.TextEdits to diff.Edits, grouped by file.
// Precondition: a prior call to validateFix succeeded.
fileEdits := make(map[string][]diff.Edit)
for _, edit := range fixact.fix.TextEdits {
file := fixact.act.FileSet.File(edit.Pos)
baseline, err := getBaseline(fixact.act.ReadFileFunc, file.Name())
if err != nil {
log.Printf("skipping fix to file %s: %v", file.Name(), err)
continue fixloop
}
// We choose to treat size mismatch as a serious error,
// as it indicates a concurrent write to at least one file,
// and possibly others (consider a git checkout, for example).
if file.Size() != len(baseline) {
return fmt.Errorf("concurrent file modification detected in file %s (size changed from %d -> %d bytes); aborting fix",
file.Name(), file.Size(), len(baseline))
}
fileEdits[file.Name()] = append(fileEdits[file.Name()], diff.Edit{
Start: file.Offset(edit.Pos),
End: file.Offset(edit.End),
New: string(edit.NewText),
})
}
// Apply each set of edits by merging atop
// the previous accumulated state.
after := make(map[string][]diff.Edit)
for file, edits := range fileEdits {
if prev := accumulatedEdits[file]; len(prev) > 0 {
merged, ok := diff.Merge(prev, edits)
if !ok {
// debugging
if false {
log.Printf("%s: fix %s conflicts", fixact.act.Name, fixact.fix.Message)
}
continue fixloop // conflict
}
edits = merged
}
after[file] = edits
}
// The entire fix applied cleanly; commit it.
goodFixes++
maps.Copy(accumulatedEdits, after)
// debugging
if false {
log.Printf("%s: fix %s applied", fixact.act.Name, fixact.fix.Message)
}
}
badFixes := len(fixes) - goodFixes
// Show diff or update files to final state.
var files []string
for file := range accumulatedEdits {
files = append(files, file)
}
sort.Strings(files) // for deterministic -diff
var filesUpdated, totalFiles int
for _, file := range files {
edits := accumulatedEdits[file]
if len(edits) == 0 {
continue // the diffs annihilated (a miracle?)
}
// Apply accumulated fixes.
baseline := baselineContent[file] // (cache hit)
final, err := diff.ApplyBytes(baseline, edits)
if err != nil {
log.Fatalf("internal error in diff.ApplyBytes: %v", err)
}
// Attempt to format each file.
if formatted, err := format.Source(final); err == nil {
final = formatted
}
if diffFlag {
// Since we formatted the file, we need to recompute the diff.
unified := diff.Unified(file+" (old)", file+" (new)", string(baseline), string(final))
// TODO(adonovan): abstract the I/O.
os.Stdout.WriteString(unified)
} else {
// write
totalFiles++
// TODO(adonovan): abstract the I/O.
if err := os.WriteFile(file, final, 0644); err != nil {
log.Println(err)
continue
}
filesUpdated++
}
}
// TODO(adonovan): consider returning a structured result that
// maps each SuggestedFix to its status:
// - invalid
// - secondary, not selected
// - applied
// - had conflicts.
// and a mapping from each affected file to:
// - its final/original content pair, and
// - whether formatting was successful.
// Then file writes and the UI can be applied by the caller
// in whatever form they like.
// If victory was incomplete, report an error that indicates partial progress.
//
// badFixes > 0 indicates that we decided not to attempt some
// fixes due to conflicts or failure to read the source; still
// it's a relatively benign situation since the user can
// re-run the tool, and we may still make progress.
//
// filesUpdated < totalFiles indicates that some file updates
// failed. This should be rare, but is a serious error as it
// may apply half a fix, or leave the files in a bad state.
//
// These numbers are potentially misleading:
// The denominator includes duplicate conflicting fixes due to
// common files in packages "p" and "p [p.test]", which may
// have been fixed fixed and won't appear in the re-run.
// TODO(adonovan): eliminate identical fixes as an initial
// filtering step.
//
// TODO(adonovan): should we log that n files were updated in case of total victory?
if badFixes > 0 || filesUpdated < totalFiles {
if diffFlag {
return fmt.Errorf("%d of %d fixes skipped (e.g. due to conflicts)", badFixes, len(fixes))
} else {
return fmt.Errorf("applied %d of %d fixes; %d files updated. (Re-run the command to apply more.)",
goodFixes, len(fixes), filesUpdated)
}
}
if verbose {
log.Printf("applied %d fixes, updated %d files", len(fixes), filesUpdated)
}
return nil
}

View file

@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Package analysisflags defines helpers for processing flags of // Package analysisflags defines helpers for processing flags (-help,
// analysis driver tools. // -json, -fix, -diff, etc) common to unitchecker and
// {single,multi}checker. It is not intended for broader use.
package analysisflags package analysisflags
import ( import (
@ -24,8 +25,10 @@ import (
// flags common to all {single,multi,unit}checkers. // flags common to all {single,multi,unit}checkers.
var ( var (
JSON = false // -json JSON = false // -json
Context = -1 // -c=N: if N>0, display offending line plus N lines of context Context = -1 // -c=N: if N>0, display offending line plus N lines of context
Fix bool // -fix
diffFlag bool // -diff (changes [ApplyFixes] behavior)
) )
// Parse creates a flag for each of the analyzer's flags, // Parse creates a flag for each of the analyzer's flags,
@ -74,6 +77,8 @@ func Parse(analyzers []*analysis.Analyzer, multi bool) []*analysis.Analyzer {
// flags common to all checkers // flags common to all checkers
flag.BoolVar(&JSON, "json", JSON, "emit JSON output") flag.BoolVar(&JSON, "json", JSON, "emit JSON output")
flag.IntVar(&Context, "c", Context, `display offending line with this many lines of context`) flag.IntVar(&Context, "c", Context, `display offending line with this many lines of context`)
flag.BoolVar(&Fix, "fix", false, "apply all suggested fixes")
flag.BoolVar(&diffFlag, "diff", false, "with -fix, don't update the files, but print a unified diff")
// Add shims for legacy vet flags to enable existing // Add shims for legacy vet flags to enable existing
// scripts that run vet to continue to work. // scripts that run vet to continue to work.

View file

@ -85,6 +85,18 @@ type Config struct {
// -V=full describe executable for build caching // -V=full describe executable for build caching
// foo.cfg perform separate modular analyze on the single // foo.cfg perform separate modular analyze on the single
// unit described by a JSON config file foo.cfg. // unit described by a JSON config file foo.cfg.
//
// Also, subject to approval of proposal #71859:
//
// -fix don't print each diagnostic, apply its first fix
// -diff don't apply a fix, print the diff (requires -fix)
//
// Additionally, the environment variable GOVET has the value "vet" or
// "fix" depending on whether the command is being invoked by "go vet",
// to report diagnostics, or "go fix", to apply fixes. This is
// necessary so that callers of Main can select their analyzer suite
// before flag parsing. (Vet analyzers must report real code problems,
// whereas Fix analyzers may fix non-problems such as style issues.)
func Main(analyzers ...*analysis.Analyzer) { func Main(analyzers ...*analysis.Analyzer) {
progname := filepath.Base(os.Args[0]) progname := filepath.Base(os.Args[0])
log.SetFlags(0) log.SetFlags(0)
@ -136,35 +148,14 @@ func Run(configFile string, analyzers []*analysis.Analyzer) {
log.Fatal(err) log.Fatal(err)
} }
code := 0
// In VetxOnly mode, the analysis is run only for facts. // In VetxOnly mode, the analysis is run only for facts.
if !cfg.VetxOnly { if !cfg.VetxOnly {
if analysisflags.JSON { code = processResults(fset, cfg.ID, results)
// JSON output
tree := make(analysisflags.JSONTree)
for _, res := range results {
tree.Add(fset, cfg.ID, res.a.Name, res.diagnostics, res.err)
}
tree.Print(os.Stdout)
} else {
// plain text
exit := 0
for _, res := range results {
if res.err != nil {
log.Println(res.err)
exit = 1
}
}
for _, res := range results {
for _, diag := range res.diagnostics {
analysisflags.PrintPlain(os.Stderr, fset, analysisflags.Context, diag)
exit = 1
}
}
os.Exit(exit)
}
} }
os.Exit(0) os.Exit(code)
} }
func readConfig(filename string) (*Config, error) { func readConfig(filename string) (*Config, error) {
@ -185,6 +176,63 @@ func readConfig(filename string) (*Config, error) {
return cfg, nil return cfg, nil
} }
func processResults(fset *token.FileSet, id string, results []result) (exit int) {
if analysisflags.Fix {
// Don't print the diagnostics,
// but apply all fixes from the root actions.
// Convert results to form needed by ApplyFixes.
fixActions := make([]analysisflags.FixAction, len(results))
for i, res := range results {
fixActions[i] = analysisflags.FixAction{
Name: res.a.Name,
FileSet: fset,
ReadFileFunc: os.ReadFile,
Diagnostics: res.diagnostics,
}
}
if err := analysisflags.ApplyFixes(fixActions, false); err != nil {
// Fail when applying fixes failed.
log.Print(err)
exit = 1
}
// Don't proceed to print text/JSON,
// and don't report an error
// just because there were diagnostics.
return
}
// Keep consistent with analogous logic in
// printDiagnostics in ../internal/checker/checker.go.
if analysisflags.JSON {
// JSON output
tree := make(analysisflags.JSONTree)
for _, res := range results {
tree.Add(fset, id, res.a.Name, res.diagnostics, res.err)
}
tree.Print(os.Stdout) // ignore error
} else {
// plain text
for _, res := range results {
if res.err != nil {
log.Println(res.err)
exit = 1
}
}
for _, res := range results {
for _, diag := range res.diagnostics {
analysisflags.PrintPlain(os.Stderr, fset, analysisflags.Context, diag)
exit = 1
}
}
}
return
}
type factImporter = func(pkgPath string) ([]byte, error) type factImporter = func(pkgPath string) ([]byte, error)
// These four hook variables are a proof of concept of a future // These four hook variables are a proof of concept of a future

177
src/cmd/vendor/golang.org/x/tools/internal/diff/diff.go generated vendored Normal file
View file

@ -0,0 +1,177 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package diff computes differences between text files or strings.
package diff
import (
"fmt"
"slices"
"sort"
"strings"
)
// An Edit describes the replacement of a portion of a text file.
type Edit struct {
Start, End int // byte offsets of the region to replace
New string // the replacement
}
func (e Edit) String() string {
return fmt.Sprintf("{Start:%d,End:%d,New:%q}", e.Start, e.End, e.New)
}
// Apply applies a sequence of edits to the src buffer and returns the
// result. Edits are applied in order of start offset; edits with the
// same start offset are applied in they order they were provided.
//
// Apply returns an error if any edit is out of bounds,
// or if any pair of edits is overlapping.
func Apply(src string, edits []Edit) (string, error) {
edits, size, err := validate(src, edits)
if err != nil {
return "", err
}
// Apply edits.
out := make([]byte, 0, size)
lastEnd := 0
for _, edit := range edits {
if lastEnd < edit.Start {
out = append(out, src[lastEnd:edit.Start]...)
}
out = append(out, edit.New...)
lastEnd = edit.End
}
out = append(out, src[lastEnd:]...)
if len(out) != size {
panic("wrong size")
}
return string(out), nil
}
// ApplyBytes is like Apply, but it accepts a byte slice.
// The result is always a new array.
func ApplyBytes(src []byte, edits []Edit) ([]byte, error) {
res, err := Apply(string(src), edits)
return []byte(res), err
}
// validate checks that edits are consistent with src,
// and returns the size of the patched output.
// It may return a different slice.
func validate(src string, edits []Edit) ([]Edit, int, error) {
if !sort.IsSorted(editsSort(edits)) {
edits = slices.Clone(edits)
SortEdits(edits)
}
// Check validity of edits and compute final size.
size := len(src)
lastEnd := 0
for _, edit := range edits {
if !(0 <= edit.Start && edit.Start <= edit.End && edit.End <= len(src)) {
return nil, 0, fmt.Errorf("diff has out-of-bounds edits")
}
if edit.Start < lastEnd {
return nil, 0, fmt.Errorf("diff has overlapping edits")
}
size += len(edit.New) + edit.Start - edit.End
lastEnd = edit.End
}
return edits, size, nil
}
// SortEdits orders a slice of Edits by (start, end) offset.
// This ordering puts insertions (end = start) before deletions
// (end > start) at the same point, but uses a stable sort to preserve
// the order of multiple insertions at the same point.
// (Apply detects multiple deletions at the same point as an error.)
func SortEdits(edits []Edit) {
sort.Stable(editsSort(edits))
}
type editsSort []Edit
func (a editsSort) Len() int { return len(a) }
func (a editsSort) Less(i, j int) bool {
if cmp := a[i].Start - a[j].Start; cmp != 0 {
return cmp < 0
}
return a[i].End < a[j].End
}
func (a editsSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// lineEdits expands and merges a sequence of edits so that each
// resulting edit replaces one or more complete lines.
// See ApplyEdits for preconditions.
func lineEdits(src string, edits []Edit) ([]Edit, error) {
edits, _, err := validate(src, edits)
if err != nil {
return nil, err
}
// Do all deletions begin and end at the start of a line,
// and all insertions end with a newline?
// (This is merely a fast path.)
for _, edit := range edits {
if edit.Start >= len(src) || // insertion at EOF
edit.Start > 0 && src[edit.Start-1] != '\n' || // not at line start
edit.End > 0 && src[edit.End-1] != '\n' || // not at line start
edit.New != "" && edit.New[len(edit.New)-1] != '\n' { // partial insert
goto expand // slow path
}
}
return edits, nil // aligned
expand:
if len(edits) == 0 {
return edits, nil // no edits (unreachable due to fast path)
}
expanded := make([]Edit, 0, len(edits)) // a guess
prev := edits[0]
// TODO(adonovan): opt: start from the first misaligned edit.
// TODO(adonovan): opt: avoid quadratic cost of string += string.
for _, edit := range edits[1:] {
between := src[prev.End:edit.Start]
if !strings.Contains(between, "\n") {
// overlapping lines: combine with previous edit.
prev.New += between + edit.New
prev.End = edit.End
} else {
// non-overlapping lines: flush previous edit.
expanded = append(expanded, expandEdit(prev, src))
prev = edit
}
}
return append(expanded, expandEdit(prev, src)), nil // flush final edit
}
// expandEdit returns edit expanded to complete whole lines.
func expandEdit(edit Edit, src string) Edit {
// Expand start left to start of line.
// (delta is the zero-based column number of start.)
start := edit.Start
if delta := start - 1 - strings.LastIndex(src[:start], "\n"); delta > 0 {
edit.Start -= delta
edit.New = src[start-delta:start] + edit.New
}
// Expand end right to end of line.
end := edit.End
if end > 0 && src[end-1] != '\n' ||
edit.New != "" && edit.New[len(edit.New)-1] != '\n' {
if nl := strings.IndexByte(src[end:], '\n'); nl < 0 {
edit.End = len(src) // extend to EOF
} else {
edit.End = end + nl + 1 // extend beyond \n
}
}
edit.New += src[end:edit.End]
return edit
}

View file

@ -0,0 +1,179 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lcs
import (
"log"
"sort"
)
// lcs is a longest common sequence
type lcs []diag
// A diag is a piece of the edit graph where A[X+i] == B[Y+i], for 0<=i<Len.
// All computed diagonals are parts of a longest common subsequence.
type diag struct {
X, Y int
Len int
}
// sort sorts in place, by lowest X, and if tied, inversely by Len
func (l lcs) sort() lcs {
sort.Slice(l, func(i, j int) bool {
if l[i].X != l[j].X {
return l[i].X < l[j].X
}
return l[i].Len > l[j].Len
})
return l
}
// validate that the elements of the lcs do not overlap
// (can only happen when the two-sided algorithm ends early)
// expects the lcs to be sorted
func (l lcs) valid() bool {
for i := 1; i < len(l); i++ {
if l[i-1].X+l[i-1].Len > l[i].X {
return false
}
if l[i-1].Y+l[i-1].Len > l[i].Y {
return false
}
}
return true
}
// repair overlapping lcs
// only called if two-sided stops early
func (l lcs) fix() lcs {
// from the set of diagonals in l, find a maximal non-conflicting set
// this problem may be NP-complete, but we use a greedy heuristic,
// which is quadratic, but with a better data structure, could be D log D.
// independent is not enough: {0,3,1} and {3,0,2} can't both occur in an lcs
// which has to have monotone x and y
if len(l) == 0 {
return nil
}
sort.Slice(l, func(i, j int) bool { return l[i].Len > l[j].Len })
tmp := make(lcs, 0, len(l))
tmp = append(tmp, l[0])
for i := 1; i < len(l); i++ {
var dir direction
nxt := l[i]
for _, in := range tmp {
if dir, nxt = overlap(in, nxt); dir == empty || dir == bad {
break
}
}
if nxt.Len > 0 && dir != bad {
tmp = append(tmp, nxt)
}
}
tmp.sort()
if false && !tmp.valid() { // debug checking
log.Fatalf("here %d", len(tmp))
}
return tmp
}
type direction int
const (
empty direction = iota // diag is empty (so not in lcs)
leftdown // proposed acceptably to the left and below
rightup // proposed diag is acceptably to the right and above
bad // proposed diag is inconsistent with the lcs so far
)
// overlap trims the proposed diag prop so it doesn't overlap with
// the existing diag that has already been added to the lcs.
func overlap(exist, prop diag) (direction, diag) {
if prop.X <= exist.X && exist.X < prop.X+prop.Len {
// remove the end of prop where it overlaps with the X end of exist
delta := prop.X + prop.Len - exist.X
prop.Len -= delta
if prop.Len <= 0 {
return empty, prop
}
}
if exist.X <= prop.X && prop.X < exist.X+exist.Len {
// remove the beginning of prop where overlaps with exist
delta := exist.X + exist.Len - prop.X
prop.Len -= delta
if prop.Len <= 0 {
return empty, prop
}
prop.X += delta
prop.Y += delta
}
if prop.Y <= exist.Y && exist.Y < prop.Y+prop.Len {
// remove the end of prop that overlaps (in Y) with exist
delta := prop.Y + prop.Len - exist.Y
prop.Len -= delta
if prop.Len <= 0 {
return empty, prop
}
}
if exist.Y <= prop.Y && prop.Y < exist.Y+exist.Len {
// remove the beginning of peop that overlaps with exist
delta := exist.Y + exist.Len - prop.Y
prop.Len -= delta
if prop.Len <= 0 {
return empty, prop
}
prop.X += delta // no test reaches this code
prop.Y += delta
}
if prop.X+prop.Len <= exist.X && prop.Y+prop.Len <= exist.Y {
return leftdown, prop
}
if exist.X+exist.Len <= prop.X && exist.Y+exist.Len <= prop.Y {
return rightup, prop
}
// prop can't be in an lcs that contains exist
return bad, prop
}
// manipulating Diag and lcs
// prepend a diagonal (x,y)-(x+1,y+1) segment either to an empty lcs
// or to its first Diag. prepend is only called to extend diagonals
// the backward direction.
func (lcs lcs) prepend(x, y int) lcs {
if len(lcs) > 0 {
d := &lcs[0]
if int(d.X) == x+1 && int(d.Y) == y+1 {
// extend the diagonal down and to the left
d.X, d.Y = int(x), int(y)
d.Len++
return lcs
}
}
r := diag{X: int(x), Y: int(y), Len: 1}
lcs = append([]diag{r}, lcs...)
return lcs
}
// append appends a diagonal, or extends the existing one.
// by adding the edge (x,y)-(x+1.y+1). append is only called
// to extend diagonals in the forward direction.
func (lcs lcs) append(x, y int) lcs {
if len(lcs) > 0 {
last := &lcs[len(lcs)-1]
// Expand last element if adjoining.
if last.X+last.Len == x && last.Y+last.Len == y {
last.Len++
return lcs
}
}
return append(lcs, diag{X: x, Y: y, Len: 1})
}
// enforce constraint on d, k
func ok(d, k int) bool {
return d >= 0 && -d <= k && k <= d
}

Some files were not shown because too many files have changed in this diff Show more