mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
[dev.simd] all: merge master (cf5e993) into dev.simd
Merge List: + 2025-09-11cf5e993177cmd/link: allow one to specify the data section in the internal linker + 2025-09-11cdb3d467faencoding/gob: make use of reflect.TypeAssert + 2025-09-11fef360964carchive/tar: fix typo in benchmark name + 2025-09-117d562b8460syscall: actually remove unreachable code + 2025-09-11c349582344crypto/rsa: don't test CL 687836 against v1.0.0 FIPS 140-3 module + 2025-09-11253dd08f5ddebug/macho: filter non-external symbols when reading imported symbols without LC_DYSYMTAB + 2025-09-102009e6c596internal/runtime/maps: remove redundant package docs + 2025-09-10de5d7eccb9runtime/internal/maps: only conditionally clear groups when sparse + 2025-09-108098b99547internal/runtime/maps: speed up Clear + 2025-09-10fe5420b054cmd: delete some more windows/arm remnants + 2025-09-10fad1dc608druntime: don't artificially limit TestReadMetricsSched + 2025-09-10b1f3e38e41cmd/compile: when CSEing two values, prefer the statement marked one + 2025-09-1000824f5ff5types2: better documentation for resolve() + 2025-09-105cf8ca42e3internal/trace/raw: use strings.Cut instead of strings.SplitN 2 + 2025-09-1080a2aae922Revert "cmd/compile: improve stp merging for non-sequent cases" + 2025-09-10f327a05419go/token, syscall: annotate if blocks that defeat vet's unreachable pass + 2025-09-109650c97d0fsyscall: remove unreachable code + 2025-09-10f1c4b860d4Revert "crypto/internal/fips140: update frozen module version to "v1.0.0"" + 2025-09-1030686c4cc8encoding/json/v2: document context annotation with SemanticError + 2025-09-09c5737dc21bruntime: when using cgo on 386, call C sigaction function + 2025-09-09b9a4a09b0fruntime: remove duff support for riscv64 + 2025-09-094dac9e093fcmd/compile: use generated loops instead of DUFFCOPY on riscv64 + 2025-09-09879ff736d3cmd/compile: use generated loops instead of DUFFZERO on riscv64 + 2025-09-0977643dc63fcmd/compile: simplify zerorange on riscv64 + 2025-09-09e6605a1bccencoding/json: use reflect.TypeAssert + 2025-09-094c20f7f15acmd/cgo: run gcc to get errors and debug info in parallel + 2025-09-095dcedd6550runtime: lock mheap_.speciallock when allocating synctest specials + 2025-09-09d3be949adaruntime: don't negate eventfd errno + 2025-09-09836fa74518syscall: optimise cgo clearenv + 2025-09-09ce39174482crypto/rsa: check PrivateKey.D for consistency with Dp and Dq + 2025-09-095d9d0513dccrypto/rsa: check for post-Precompute changes in Validate + 2025-09-09968a5107a9crypto/internal/fips140: update frozen module version to "v1.0.0" + 2025-09-09645ee44492crypto/ecdsa: deprecate direct use of big.Int fields in keys + 2025-09-09a67977da5ecmd/compile/internal/inline: ignore superfluous slicing + 2025-09-09a5fa5ea51ccmd/compile/internal/ssa: expand runtime.memequal for length {3,5,6,7} + 2025-09-094c63d798cbcmd/compile: improve stp merging for non-sequent cases + 2025-09-09bdd51e7855cmd/compile: use constant zero register instead of specialized zero instructions on mips64x + 2025-09-0910ac80de77cmd/compile: introduce CCMP generation + 2025-09-093b3b16957cRevert "cmd/go: use os.Rename to move files on Windows" + 2025-09-09e3223518b8cmd/go: split generating cover files into its own action + 2025-09-09af03343f93cmd/compile: fix bounds check report + 2025-09-086447ff409acmd/compile: fold constant in ADDshift op on loong64 + 2025-09-085b218461f9cmd/compile: optimize loads from abi.Type.{Size_,PtrBytes,Kind_} + 2025-09-08b915e14490cmd/compile: consolidate logic for rewriting fixed loads + 2025-09-0806e791c0cdcmd/compile: simplify zerorange on mips + 2025-09-08cf42b785b7cmd/cgo: run recordTypes for each of the debugs at the end of Translate + 2025-09-085e6296f3f8archive/tar: optimize nanosecond parsing in parsePAXTime + 2025-09-08ea00650784debug/pe: permit symbols with no name + 2025-09-084cc7cc74c3crypto: update Hash comments to point to crypto/sha3 + 2025-09-08ff45d5d53cencoding/json/internal/jsonflags: fix comment with wrong field name + 2025-09-06861c90c907net/http: pool transport gzip readers + 2025-09-0657769b5532os: reject OpenDir of a non-directory file in Plan 9 + 2025-09-06a6144613d3crypto/tls: use context.AfterFunc in handshakeContext + 2025-09-05e8126bce9eruntime/cgo: save and restore R31 for crosscall1 on loong64 + 2025-09-05d767064170cmd/compile: mark abi.PtrType.Elem sym as used + 2025-09-050b1eed09a3vendor/golang.org/x/tools: update to a09a2fb + 2025-09-05f5b20689e9cmd/compile: optimize loads from readonly globals into constants on loong64 + 2025-09-053492e4262bcmd/compile: simplify specific addition operations using the ADDV16 instruction + 2025-09-05459b85ccaacmd/fix: remove all functionality except for buildtag + 2025-09-0587e72769faruntime: simplify openbsd check in usesLibcall and mStackIsSystemAllocated + 2025-09-05bb48272e24cmd/compile: simplify zerorange on mips64 + 2025-09-05d52a56cce1cmd/link/internal/ld: unconditionally use posix_fallocate on FreeBSD + 2025-09-049d0829963cnet/http: fix cookie value of "" being interpreted as empty string. + 2025-09-04ddce0522becmd/internal/obj/loong64: add ADDU16I.D instruction support + 2025-09-0400b8474e47cmd/trace: don't filter events for profile by whether they have stack + 2025-09-04e36c5aead6log/slog: add multiple handlers support for logger + 2025-09-04150fae714ecrypto/x509: don't force system roots load in SetFallbackRoots + 2025-09-044f7bbc62c7runtime, cmd/compile, cmd/internal/obj: remove duff support for loong64 + 2025-09-04b8cc907425cmd/internal/obj/loong64: fix the usage of offset in the instructions [X]VLDREPL.{B/H/W/D} + 2025-09-048c27a80890path{,/filepath}: speed up Match + 2025-09-04b7c20413c5runtime: remove obsolete osArchInit function + 2025-09-04df29038486cmd/compile/internal/ssa: load constant values from abi.PtrType.Elem + 2025-09-044373754bc9cmd/compile: add store to load forwarding rules on riscv64 + 2025-09-0380038586edcmd/compile: export to DWARF types only referenced through interfaces + 2025-09-0391e76a513bcmd/compile: use generated loops instead of DUFFCOPY on loong64 + 2025-09-03c552ad913fcmd/compile: simplify memory load and store operations on loong64 + 2025-09-03e8f9127d1fnet/netip: export Prefix.Compare, fix ordering + 2025-09-03731e546166cmd/compile: simplify the support for 32bit high multiply on loong64 Change-Id: I2c124fb8071e2972d39804867cafb6806e601aba
This commit is contained in:
commit
9a349f8e72
204 changed files with 7284 additions and 5911 deletions
1
api/next/61642.txt
Normal file
1
api/next/61642.txt
Normal file
|
|
@ -0,0 +1 @@
|
|||
pkg net/netip, method (Prefix) Compare(Prefix) int #61642
|
||||
3
api/next/63963.txt
Normal file
3
api/next/63963.txt
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
pkg crypto/ecdsa, type PrivateKey struct, D //deprecated #63963
|
||||
pkg crypto/ecdsa, type PublicKey struct, X //deprecated #63963
|
||||
pkg crypto/ecdsa, type PublicKey struct, Y //deprecated #63963
|
||||
6
api/next/65954.txt
Normal file
6
api/next/65954.txt
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
pkg log/slog, func NewMultiHandler(...Handler) *MultiHandler #65954
|
||||
pkg log/slog, method (*MultiHandler) Enabled(context.Context, Level) bool #65954
|
||||
pkg log/slog, method (*MultiHandler) Handle(context.Context, Record) error #65954
|
||||
pkg log/slog, method (*MultiHandler) WithAttrs([]Attr) Handler #65954
|
||||
pkg log/slog, method (*MultiHandler) WithGroup(string) Handler #65954
|
||||
pkg log/slog, type MultiHandler struct #65954
|
||||
1
doc/next/6-stdlib/99-minor/crypto/ecdsa/63963.md
Normal file
1
doc/next/6-stdlib/99-minor/crypto/ecdsa/63963.md
Normal file
|
|
@ -0,0 +1 @@
|
|||
The `big.Int` fields of [PublicKey] and [PrivateKey] are now deprecated.
|
||||
5
doc/next/6-stdlib/99-minor/crypto/rsa/74115.md
Normal file
5
doc/next/6-stdlib/99-minor/crypto/rsa/74115.md
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
If [PrivateKey] fields are modified after calling [PrivateKey.Precompute],
|
||||
[PrivateKey.Validate] now fails.
|
||||
|
||||
[PrivateKey.D] is now checked for consistency with precomputed values, even if
|
||||
it is not used.
|
||||
6
doc/next/6-stdlib/99-minor/log/slog/65954.md
Normal file
6
doc/next/6-stdlib/99-minor/log/slog/65954.md
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
The [`NewMultiHandler`](/pkg/log/slog#NewMultiHandler) function creates a
|
||||
[`MultiHandler`](/pkg/log/slog#MultiHandler) that invokes all the given Handlers.
|
||||
Its `Enable` method reports whether any of the handlers' `Enabled` methods
|
||||
return true.
|
||||
Its `Handle`, `WithAttr` and `WithGroup` methods call the corresponding method
|
||||
on each of the enabled handlers.
|
||||
1
doc/next/6-stdlib/99-minor/net/netip/61642.md
Normal file
1
doc/next/6-stdlib/99-minor/net/netip/61642.md
Normal file
|
|
@ -0,0 +1 @@
|
|||
The new [Prefix.Compare] method compares two prefixes.
|
||||
|
|
@ -213,15 +213,17 @@ func parsePAXTime(s string) (time.Time, error) {
|
|||
}
|
||||
|
||||
// Parse the nanoseconds.
|
||||
if strings.Trim(sn, "0123456789") != "" {
|
||||
return time.Time{}, ErrHeader
|
||||
// Initialize an array with '0's to handle right padding automatically.
|
||||
nanoDigits := [maxNanoSecondDigits]byte{'0', '0', '0', '0', '0', '0', '0', '0', '0'}
|
||||
for i := range len(sn) {
|
||||
switch c := sn[i]; {
|
||||
case c < '0' || c > '9':
|
||||
return time.Time{}, ErrHeader
|
||||
case i < len(nanoDigits):
|
||||
nanoDigits[i] = c
|
||||
}
|
||||
}
|
||||
if len(sn) < maxNanoSecondDigits {
|
||||
sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad
|
||||
} else {
|
||||
sn = sn[:maxNanoSecondDigits] // Right truncate
|
||||
}
|
||||
nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed
|
||||
nsecs, _ := strconv.ParseInt(string(nanoDigits[:]), 10, 64) // Must succeed after validation
|
||||
if len(ss) > 0 && ss[0] == '-' {
|
||||
return time.Unix(secs, -1*nsecs), nil // Negative correction
|
||||
}
|
||||
|
|
|
|||
|
|
@ -439,3 +439,66 @@ func TestFormatPAXRecord(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkParsePAXTime(b *testing.B) {
|
||||
tests := []struct {
|
||||
name string
|
||||
in string
|
||||
want time.Time
|
||||
ok bool
|
||||
}{
|
||||
{
|
||||
name: "NoNanos",
|
||||
in: "123456",
|
||||
want: time.Unix(123456, 0),
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
name: "ExactNanos",
|
||||
in: "1.123456789",
|
||||
want: time.Unix(1, 123456789),
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
name: "WithNanoPadding",
|
||||
in: "1.123",
|
||||
want: time.Unix(1, 123000000),
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
name: "WithNanoTruncate",
|
||||
in: "1.123456789123",
|
||||
want: time.Unix(1, 123456789),
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
name: "TrailingError",
|
||||
in: "1.123abc",
|
||||
want: time.Time{},
|
||||
ok: false,
|
||||
},
|
||||
{
|
||||
name: "LeadingError",
|
||||
in: "1.abc123",
|
||||
want: time.Time{},
|
||||
ok: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
b.Run(tt.name, func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for b.Loop() {
|
||||
ts, err := parsePAXTime(tt.in)
|
||||
if (err == nil) != tt.ok {
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.Fatal("expected error")
|
||||
}
|
||||
if !ts.Equal(tt.want) {
|
||||
b.Fatalf("time mismatch: got %v, want %v", ts, tt.want)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
35
src/cmd/asm/internal/asm/testdata/loong64enc1.s
vendored
35
src/cmd/asm/internal/asm/testdata/loong64enc1.s
vendored
|
|
@ -282,6 +282,13 @@ lable2:
|
|||
MOVVP 4(R5), R4 // a4040026
|
||||
MOVVP (R5), R4 // a4000026
|
||||
|
||||
// ADDU16I.D instruction
|
||||
ADDV16 $(-32768<<16), R4, R5 // ADDV16 $-2147483648, R4, R5 // 85000012
|
||||
ADDV16 $(0<<16), R4, R5 // ADDV16 $0, R4, R5 // 85000010
|
||||
ADDV16 $(8<<16), R4, R5 // ADDV16 $524288, R4, R5 // 85200010
|
||||
ADDV16 $(32767<<16), R4, R5 // ADDV16 $2147418112, R4, R5 // 85fcff11
|
||||
ADDV16 $(16<<16), R4 // ADDV16 $1048576, R4 // 84400010
|
||||
|
||||
// Loong64 atomic memory access instructions
|
||||
AMSWAPB R14, (R13), R12 // ac395c38
|
||||
AMSWAPH R14, (R13), R12 // acb95c38
|
||||
|
|
@ -538,13 +545,29 @@ lable2:
|
|||
|
||||
// Load data from memory and broadcast to each element of a vector register: VMOVQ offset(Rj), <Vd>.<T>
|
||||
VMOVQ (R4), V0.B16 // 80008030
|
||||
VMOVQ 1(R4), V1.H8 // 81044030
|
||||
VMOVQ 2(R4), V2.W4 // 82082030
|
||||
VMOVQ 3(R4), V3.V2 // 830c1030
|
||||
VMOVQ 1(R4), V0.B16 // 80048030
|
||||
VMOVQ -3(R4), V0.B16 // 80f4bf30
|
||||
VMOVQ (R4), V1.H8 // 81004030
|
||||
VMOVQ 2(R4), V1.H8 // 81044030
|
||||
VMOVQ -6(R4), V1.H8 // 81f45f30
|
||||
VMOVQ (R4), V2.W4 // 82002030
|
||||
VMOVQ 8(R4), V2.W4 // 82082030
|
||||
VMOVQ -12(R4), V2.W4 // 82f42f30
|
||||
VMOVQ (R4), V3.V2 // 83001030
|
||||
VMOVQ 24(R4), V3.V2 // 830c1030
|
||||
VMOVQ -16(R4), V3.V2 // 83f81730
|
||||
XVMOVQ (R4), X0.B32 // 80008032
|
||||
XVMOVQ 1(R4), X1.H16 // 81044032
|
||||
XVMOVQ 2(R4), X2.W8 // 82082032
|
||||
XVMOVQ 3(R4), X3.V4 // 830c1032
|
||||
XVMOVQ 1(R4), X0.B32 // 80048032
|
||||
XVMOVQ -5(R4), X0.B32 // 80ecbf32
|
||||
XVMOVQ (R4), X1.H16 // 81004032
|
||||
XVMOVQ 2(R4), X1.H16 // 81044032
|
||||
XVMOVQ -10(R4), X1.H16 // 81ec5f32
|
||||
XVMOVQ (R4), X2.W8 // 82002032
|
||||
XVMOVQ 8(R4), X2.W8 // 82082032
|
||||
XVMOVQ -20(R4), X2.W8 // 82ec2f32
|
||||
XVMOVQ (R4), X3.V4 // 83001032
|
||||
XVMOVQ 24(R4), X3.V4 // 830c1032
|
||||
XVMOVQ -24(R4), X3.V4 // 83f41732
|
||||
|
||||
// VSEQ{B,H,W,V}, XVSEQ{B,H,W,V} instruction
|
||||
VSEQB V1, V2, V3 // 43040070
|
||||
|
|
|
|||
|
|
@ -5,3 +5,5 @@
|
|||
TEXT errors(SB),$0
|
||||
VSHUF4IV $16, V1, V2 // ERROR "operand out of range 0 to 15"
|
||||
XVSHUF4IV $16, X1, X2 // ERROR "operand out of range 0 to 15"
|
||||
ADDV16 $1, R4, R5 // ERROR "the constant must be a multiple of 65536."
|
||||
ADDV16 $65535, R4, R5 // ERROR "the constant must be a multiple of 65536."
|
||||
|
|
|
|||
|
|
@ -183,18 +183,16 @@ func splitQuoted(s string) (r []string, err error) {
|
|||
return args, err
|
||||
}
|
||||
|
||||
// Translate rewrites f.AST, the original Go input, to remove
|
||||
// references to the imported package C, replacing them with
|
||||
// references to the equivalent Go types, functions, and variables.
|
||||
func (p *Package) Translate(f *File) {
|
||||
// loadDebug runs gcc to load debug information for the File. The debug
|
||||
// information will be saved to the debugs field of the file, and be
|
||||
// processed when Translate is called on the file later.
|
||||
// loadDebug is called concurrently with different files.
|
||||
func (f *File) loadDebug(p *Package) {
|
||||
for _, cref := range f.Ref {
|
||||
// Convert C.ulong to C.unsigned long, etc.
|
||||
cref.Name.C = cname(cref.Name.Go)
|
||||
}
|
||||
|
||||
var conv typeConv
|
||||
conv.Init(p.PtrSize, p.IntSize)
|
||||
|
||||
ft := fileTypedefs{typedefs: make(map[string]bool)}
|
||||
numTypedefs := -1
|
||||
for len(ft.typedefs) > numTypedefs {
|
||||
|
|
@ -213,8 +211,7 @@ func (p *Package) Translate(f *File) {
|
|||
}
|
||||
needType := p.guessKinds(f)
|
||||
if len(needType) > 0 {
|
||||
d := p.loadDWARF(f, &ft, needType)
|
||||
p.recordTypes(f, d, &conv)
|
||||
f.debugs = append(f.debugs, p.loadDWARF(f, &ft, needType))
|
||||
}
|
||||
|
||||
// In godefs mode we're OK with the typedefs, which
|
||||
|
|
@ -224,6 +221,18 @@ func (p *Package) Translate(f *File) {
|
|||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Translate rewrites f.AST, the original Go input, to remove
|
||||
// references to the imported package C, replacing them with
|
||||
// references to the equivalent Go types, functions, and variables.
|
||||
// Preconditions: File.loadDebug must be called prior to translate.
|
||||
func (p *Package) Translate(f *File) {
|
||||
var conv typeConv
|
||||
conv.Init(p.PtrSize, p.IntSize)
|
||||
for _, d := range f.debugs {
|
||||
p.recordTypes(f, d, &conv)
|
||||
}
|
||||
p.prepareNames(f)
|
||||
if p.rewriteCalls(f) {
|
||||
// Add `import _cgo_unsafe "unsafe"` after the package statement.
|
||||
|
|
@ -280,6 +289,7 @@ func (f *File) loadDefines(gccOptions []string) bool {
|
|||
// guessKinds tricks gcc into revealing the kind of each
|
||||
// name xxx for the references C.xxx in the Go input.
|
||||
// The kind is either a constant, type, or variable.
|
||||
// guessKinds is called concurrently with different files.
|
||||
func (p *Package) guessKinds(f *File) []*Name {
|
||||
// Determine kinds for names we already know about,
|
||||
// like #defines or 'struct foo', before bothering with gcc.
|
||||
|
|
@ -523,6 +533,7 @@ func (p *Package) guessKinds(f *File) []*Name {
|
|||
// loadDWARF parses the DWARF debug information generated
|
||||
// by gcc to learn the details of the constants, variables, and types
|
||||
// being referred to as C.xxx.
|
||||
// loadDwarf is called concurrently with different files.
|
||||
func (p *Package) loadDWARF(f *File, ft *fileTypedefs, names []*Name) *debug {
|
||||
// Extract the types from the DWARF section of an object
|
||||
// from a well-formed C program. Gcc only generates DWARF info
|
||||
|
|
@ -1786,6 +1797,7 @@ func gccTmp() string {
|
|||
|
||||
// gccCmd returns the gcc command line to use for compiling
|
||||
// the input.
|
||||
// gccCommand is called concurrently for different files.
|
||||
func (p *Package) gccCmd(ofile string) []string {
|
||||
c := append(gccBaseCmd,
|
||||
"-w", // no warnings
|
||||
|
|
@ -1829,6 +1841,7 @@ func (p *Package) gccCmd(ofile string) []string {
|
|||
|
||||
// gccDebug runs gcc -gdwarf-2 over the C program stdin and
|
||||
// returns the corresponding DWARF data and, if present, debug data block.
|
||||
// gccDebug is called concurrently with different C programs.
|
||||
func (p *Package) gccDebug(stdin []byte, nnames int) (d *dwarf.Data, ints []int64, floats []float64, strs []string) {
|
||||
ofile := gccTmp()
|
||||
runGcc(stdin, p.gccCmd(ofile))
|
||||
|
|
@ -2219,6 +2232,7 @@ func gccDefines(stdin []byte, gccOptions []string) string {
|
|||
// gccErrors runs gcc over the C program stdin and returns
|
||||
// the errors that gcc prints. That is, this function expects
|
||||
// gcc to fail.
|
||||
// gccErrors is called concurrently with different C programs.
|
||||
func (p *Package) gccErrors(stdin []byte, extraArgs ...string) string {
|
||||
// TODO(rsc): require failure
|
||||
args := p.gccCmd(gccTmp())
|
||||
|
|
|
|||
|
|
@ -30,6 +30,7 @@ import (
|
|||
"cmd/internal/edit"
|
||||
"cmd/internal/hash"
|
||||
"cmd/internal/objabi"
|
||||
"cmd/internal/par"
|
||||
"cmd/internal/telemetry/counter"
|
||||
)
|
||||
|
||||
|
|
@ -74,6 +75,8 @@ type File struct {
|
|||
NoCallbacks map[string]bool // C function names that with #cgo nocallback directive
|
||||
NoEscapes map[string]bool // C function names that with #cgo noescape directive
|
||||
Edit *edit.Buffer
|
||||
|
||||
debugs []*debug // debug data from iterations of gccDebug. Initialized by File.loadDebug.
|
||||
}
|
||||
|
||||
func (f *File) offset(p token.Pos) int {
|
||||
|
|
@ -391,7 +394,7 @@ func main() {
|
|||
h := hash.New32()
|
||||
io.WriteString(h, *importPath)
|
||||
var once sync.Once
|
||||
var wg sync.WaitGroup
|
||||
q := par.NewQueue(runtime.GOMAXPROCS(0))
|
||||
fs := make([]*File, len(goFiles))
|
||||
for i, input := range goFiles {
|
||||
if *srcDir != "" {
|
||||
|
|
@ -413,9 +416,7 @@ func main() {
|
|||
fatalf("%s", err)
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
q.Add(func() {
|
||||
// Apply trimpath to the file path. The path won't be read from after this point.
|
||||
input, _ = objabi.ApplyRewrites(input, *trimpath)
|
||||
if strings.ContainsAny(input, "\r\n") {
|
||||
|
|
@ -436,10 +437,12 @@ func main() {
|
|||
})
|
||||
|
||||
fs[i] = f
|
||||
}()
|
||||
|
||||
f.loadDebug(p)
|
||||
})
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
<-q.Idle()
|
||||
|
||||
cPrefix = fmt.Sprintf("_%x", h.Sum(nil)[0:6])
|
||||
|
||||
|
|
|
|||
|
|
@ -1370,7 +1370,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
|||
}
|
||||
case ssa.OpAMD64LoweredPanicBoundsCR:
|
||||
yIsReg = true
|
||||
yVal := int(v.Args[0].Reg() - x86.REG_AX)
|
||||
yVal = int(v.Args[0].Reg() - x86.REG_AX)
|
||||
c := v.Aux.(ssa.PanicBoundsC).C
|
||||
if c >= 0 && c <= abi.BoundsMaxConst {
|
||||
xVal = int(c)
|
||||
|
|
|
|||
|
|
@ -777,7 +777,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
|||
}
|
||||
case ssa.OpARMLoweredPanicBoundsCR:
|
||||
yIsReg = true
|
||||
yVal := int(v.Args[0].Reg() - arm.REG_R0)
|
||||
yVal = int(v.Args[0].Reg() - arm.REG_R0)
|
||||
c := v.Aux.(ssa.PanicBoundsC).C
|
||||
if c >= 0 && c <= abi.BoundsMaxConst {
|
||||
xVal = int(c)
|
||||
|
|
|
|||
|
|
@ -1050,6 +1050,27 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
|||
p.From.Offset = int64(condCode)
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Reg()
|
||||
case ssa.OpARM64CCMP,
|
||||
ssa.OpARM64CCMN,
|
||||
ssa.OpARM64CCMPconst,
|
||||
ssa.OpARM64CCMNconst,
|
||||
ssa.OpARM64CCMPW,
|
||||
ssa.OpARM64CCMNW,
|
||||
ssa.OpARM64CCMPWconst,
|
||||
ssa.OpARM64CCMNWconst:
|
||||
p := s.Prog(v.Op.Asm())
|
||||
p.Reg = v.Args[0].Reg()
|
||||
params := v.AuxArm64ConditionalParams()
|
||||
p.From.Type = obj.TYPE_SPECIAL // assembler encodes conditional bits in Offset
|
||||
p.From.Offset = int64(condBits[params.Cond()])
|
||||
constValue, ok := params.ConstValue()
|
||||
if ok {
|
||||
p.AddRestSourceConst(constValue)
|
||||
} else {
|
||||
p.AddRestSourceReg(v.Args[1].Reg())
|
||||
}
|
||||
p.To.Type = obj.TYPE_CONST
|
||||
p.To.Offset = params.Nzcv()
|
||||
case ssa.OpARM64LoweredZero:
|
||||
ptrReg := v.Args[0].Reg()
|
||||
n := v.AuxInt
|
||||
|
|
@ -1319,7 +1340,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
|||
}
|
||||
case ssa.OpARM64LoweredPanicBoundsCR:
|
||||
yIsReg = true
|
||||
yVal := int(v.Args[0].Reg() - arm64.REG_R0)
|
||||
yVal = int(v.Args[0].Reg() - arm64.REG_R0)
|
||||
c := v.Aux.(ssa.PanicBoundsC).C
|
||||
if c >= 0 && c <= abi.BoundsMaxConst {
|
||||
xVal = int(c)
|
||||
|
|
|
|||
|
|
@ -128,14 +128,29 @@ func Info(ctxt *obj.Link, fnsym *obj.LSym, infosym *obj.LSym, curfn obj.Func) (s
|
|||
// already referenced by a dwarf var, attach an R_USETYPE relocation to
|
||||
// the function symbol to insure that the type included in DWARF
|
||||
// processing during linking.
|
||||
// Do the same with R_USEIFACE relocations from the function symbol for the
|
||||
// same reason.
|
||||
// All these R_USETYPE relocations are only looked at if the function
|
||||
// survives deadcode elimination in the linker.
|
||||
typesyms := []*obj.LSym{}
|
||||
for t := range fnsym.Func().Autot {
|
||||
typesyms = append(typesyms, t)
|
||||
}
|
||||
for i := range fnsym.R {
|
||||
if fnsym.R[i].Type == objabi.R_USEIFACE && !strings.HasPrefix(fnsym.R[i].Sym.Name, "go:itab.") {
|
||||
// Types referenced through itab will be referenced from somewhere else
|
||||
typesyms = append(typesyms, fnsym.R[i].Sym)
|
||||
}
|
||||
}
|
||||
slices.SortFunc(typesyms, func(a, b *obj.LSym) int {
|
||||
return strings.Compare(a.Name, b.Name)
|
||||
})
|
||||
var lastsym *obj.LSym
|
||||
for _, sym := range typesyms {
|
||||
if sym == lastsym {
|
||||
continue
|
||||
}
|
||||
lastsym = sym
|
||||
infosym.AddRel(ctxt, obj.Reloc{Type: objabi.R_USETYPE, Sym: sym})
|
||||
}
|
||||
fnsym.Func().Autot = nil
|
||||
|
|
|
|||
105
src/cmd/compile/internal/dwarfgen/linenum_test.go
Normal file
105
src/cmd/compile/internal/dwarfgen/linenum_test.go
Normal file
|
|
@ -0,0 +1,105 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package dwarfgen
|
||||
|
||||
import (
|
||||
"debug/dwarf"
|
||||
"internal/platform"
|
||||
"internal/testenv"
|
||||
"io"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIssue75249(t *testing.T) {
|
||||
testenv.MustHaveGoRun(t)
|
||||
t.Parallel()
|
||||
|
||||
if !platform.ExecutableHasDWARF(runtime.GOOS, runtime.GOARCH) {
|
||||
t.Skipf("skipping on %s/%s: no DWARF symbol table in executables", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
||||
|
||||
code := `
|
||||
package main
|
||||
|
||||
type Data struct {
|
||||
Field1 int
|
||||
Field2 *int
|
||||
Field3 int
|
||||
Field4 *int
|
||||
Field5 int
|
||||
Field6 *int
|
||||
Field7 int
|
||||
Field8 *int
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func InitializeData(d *Data) {
|
||||
d.Field1++ // line 16
|
||||
d.Field2 = d.Field4
|
||||
d.Field3++
|
||||
d.Field4 = d.Field6
|
||||
d.Field5++
|
||||
d.Field6 = d.Field8
|
||||
d.Field7++
|
||||
d.Field8 = d.Field2 // line 23
|
||||
}
|
||||
|
||||
func main() {
|
||||
var data Data
|
||||
InitializeData(&data)
|
||||
}
|
||||
`
|
||||
|
||||
_, f := gobuild(t, t.TempDir(), true, []testline{{line: code}})
|
||||
defer f.Close()
|
||||
|
||||
dwarfData, err := f.DWARF()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dwarfReader := dwarfData.Reader()
|
||||
|
||||
for {
|
||||
entry, err := dwarfReader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if entry == nil {
|
||||
break
|
||||
}
|
||||
if entry.Tag != dwarf.TagCompileUnit {
|
||||
continue
|
||||
}
|
||||
name := entry.AttrField(dwarf.AttrName)
|
||||
if name == nil || name.Class != dwarf.ClassString || name.Val != "main" {
|
||||
continue
|
||||
}
|
||||
lr, err := dwarfData.LineReader(entry)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
stmts := map[int]bool{}
|
||||
for {
|
||||
var le dwarf.LineEntry
|
||||
err := lr.Next(&le)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !le.IsStmt {
|
||||
continue
|
||||
}
|
||||
stmts[le.Line] = true
|
||||
}
|
||||
for i := 16; i <= 23; i++ {
|
||||
if !stmts[i] {
|
||||
t.Errorf("missing statement at line %d", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -768,6 +768,17 @@ opSwitch:
|
|||
if n.X.Op() == ir.OINDEX && isIndexingCoverageCounter(n.X) {
|
||||
return false
|
||||
}
|
||||
|
||||
case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
|
||||
n := n.(*ir.SliceExpr)
|
||||
|
||||
// Ignore superfluous slicing.
|
||||
if n.Low != nil && n.Low.Op() == ir.OLITERAL && ir.Int64Val(n.Low) == 0 {
|
||||
v.budget++
|
||||
}
|
||||
if n.High != nil && n.High.Op() == ir.OLEN && n.High.(*ir.UnaryExpr).X == n.X {
|
||||
v.budget += 2
|
||||
}
|
||||
}
|
||||
|
||||
v.budget--
|
||||
|
|
|
|||
|
|
@ -185,7 +185,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
|||
ssa.OpLOONG64MULD,
|
||||
ssa.OpLOONG64DIVF,
|
||||
ssa.OpLOONG64DIVD,
|
||||
ssa.OpLOONG64MULV, ssa.OpLOONG64MULHV, ssa.OpLOONG64MULHVU,
|
||||
ssa.OpLOONG64MULV, ssa.OpLOONG64MULHV, ssa.OpLOONG64MULHVU, ssa.OpLOONG64MULH, ssa.OpLOONG64MULHU,
|
||||
ssa.OpLOONG64DIVV, ssa.OpLOONG64REMV, ssa.OpLOONG64DIVVU, ssa.OpLOONG64REMVU,
|
||||
ssa.OpLOONG64FCOPYSGD:
|
||||
p := s.Prog(v.Op.Asm())
|
||||
|
|
@ -276,6 +276,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
|||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Reg()
|
||||
case ssa.OpLOONG64ADDVconst,
|
||||
ssa.OpLOONG64ADDV16const,
|
||||
ssa.OpLOONG64SUBVconst,
|
||||
ssa.OpLOONG64ANDconst,
|
||||
ssa.OpLOONG64ORconst,
|
||||
|
|
@ -552,13 +553,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
|||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Reg()
|
||||
|
||||
case ssa.OpLOONG64DUFFZERO:
|
||||
// runtime.duffzero expects start address in R20
|
||||
p := s.Prog(obj.ADUFFZERO)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
p.To.Sym = ir.Syms.Duffzero
|
||||
p.To.Offset = v.AuxInt
|
||||
case ssa.OpLOONG64LoweredZero:
|
||||
ptrReg := v.Args[0].Reg()
|
||||
n := v.AuxInt
|
||||
|
|
@ -652,49 +646,120 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
|||
zero8(s, ptrReg, off+n-8)
|
||||
}
|
||||
|
||||
case ssa.OpLOONG64DUFFCOPY:
|
||||
p := s.Prog(obj.ADUFFCOPY)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
p.To.Sym = ir.Syms.Duffcopy
|
||||
p.To.Offset = v.AuxInt
|
||||
case ssa.OpLOONG64LoweredMove:
|
||||
// MOVx (Rarg1), Rtmp
|
||||
// MOVx Rtmp, (Rarg0)
|
||||
// ADDV $sz, Rarg1
|
||||
// ADDV $sz, Rarg0
|
||||
// BGEU Rarg2, Rarg0, -4(PC)
|
||||
mov, sz := largestMove(v.AuxInt)
|
||||
p := s.Prog(mov)
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Reg = v.Args[1].Reg()
|
||||
dstReg := v.Args[0].Reg()
|
||||
srcReg := v.Args[1].Reg()
|
||||
if dstReg == srcReg {
|
||||
break
|
||||
}
|
||||
tmpReg := int16(loong64.REG_R20)
|
||||
n := v.AuxInt
|
||||
if n < 16 {
|
||||
v.Fatalf("Move too small %d", n)
|
||||
}
|
||||
|
||||
var off int64
|
||||
for n >= 8 {
|
||||
// MOVV off(srcReg), tmpReg
|
||||
// MOVV tmpReg, off(dstReg)
|
||||
move8(s, srcReg, dstReg, tmpReg, off)
|
||||
off += 8
|
||||
n -= 8
|
||||
}
|
||||
|
||||
if n != 0 {
|
||||
// MOVV off+n-8(srcReg), tmpReg
|
||||
// MOVV tmpReg, off+n-8(srcReg)
|
||||
move8(s, srcReg, dstReg, tmpReg, off+n-8)
|
||||
}
|
||||
case ssa.OpLOONG64LoweredMoveLoop:
|
||||
dstReg := v.Args[0].Reg()
|
||||
srcReg := v.Args[1].Reg()
|
||||
if dstReg == srcReg {
|
||||
break
|
||||
}
|
||||
countReg := int16(loong64.REG_R20)
|
||||
tmpReg := int16(loong64.REG_R21)
|
||||
var off int64
|
||||
n := v.AuxInt
|
||||
loopSize := int64(64)
|
||||
if n < 3*loopSize {
|
||||
// - a loop count of 0 won't work.
|
||||
// - a loop count of 1 is useless.
|
||||
// - a loop count of 2 is a code size ~tie
|
||||
// 4 instructions to implement the loop
|
||||
// 8 instructions in the loop body
|
||||
// vs
|
||||
// 16 instructions in the straightline code
|
||||
// Might as well use straightline code.
|
||||
v.Fatalf("ZeroLoop size too small %d", n)
|
||||
}
|
||||
|
||||
// Put iteration count in a register.
|
||||
// MOVV $n/loopSize, countReg
|
||||
p := s.Prog(loong64.AMOVV)
|
||||
p.From.Type = obj.TYPE_CONST
|
||||
p.From.Offset = n / loopSize
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = loong64.REGTMP
|
||||
p.To.Reg = countReg
|
||||
cntInit := p
|
||||
|
||||
p2 := s.Prog(mov)
|
||||
p2.From.Type = obj.TYPE_REG
|
||||
p2.From.Reg = loong64.REGTMP
|
||||
p2.To.Type = obj.TYPE_MEM
|
||||
p2.To.Reg = v.Args[0].Reg()
|
||||
// Move loopSize bytes starting at srcReg to dstReg.
|
||||
for range loopSize / 8 {
|
||||
// MOVV off(srcReg), tmpReg
|
||||
// MOVV tmpReg, off(dstReg)
|
||||
move8(s, srcReg, dstReg, tmpReg, off)
|
||||
off += 8
|
||||
}
|
||||
|
||||
p3 := s.Prog(loong64.AADDVU)
|
||||
p3.From.Type = obj.TYPE_CONST
|
||||
p3.From.Offset = sz
|
||||
p3.To.Type = obj.TYPE_REG
|
||||
p3.To.Reg = v.Args[1].Reg()
|
||||
// Increment srcReg and destReg by loopSize.
|
||||
// ADDV $loopSize, srcReg
|
||||
p = s.Prog(loong64.AADDV)
|
||||
p.From.Type = obj.TYPE_CONST
|
||||
p.From.Offset = loopSize
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = srcReg
|
||||
// ADDV $loopSize, dstReg
|
||||
p = s.Prog(loong64.AADDV)
|
||||
p.From.Type = obj.TYPE_CONST
|
||||
p.From.Offset = loopSize
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = dstReg
|
||||
|
||||
p4 := s.Prog(loong64.AADDVU)
|
||||
p4.From.Type = obj.TYPE_CONST
|
||||
p4.From.Offset = sz
|
||||
p4.To.Type = obj.TYPE_REG
|
||||
p4.To.Reg = v.Args[0].Reg()
|
||||
// Decrement loop count.
|
||||
// SUBV $1, countReg
|
||||
p = s.Prog(loong64.ASUBV)
|
||||
p.From.Type = obj.TYPE_CONST
|
||||
p.From.Offset = 1
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = countReg
|
||||
|
||||
p5 := s.Prog(loong64.ABGEU)
|
||||
p5.From.Type = obj.TYPE_REG
|
||||
p5.From.Reg = v.Args[2].Reg()
|
||||
p5.Reg = v.Args[1].Reg()
|
||||
p5.To.Type = obj.TYPE_BRANCH
|
||||
p5.To.SetTarget(p)
|
||||
// Jump to loop header if we're not done yet.
|
||||
// BNE countReg, loop header
|
||||
p = s.Prog(loong64.ABNE)
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = countReg
|
||||
p.To.Type = obj.TYPE_BRANCH
|
||||
p.To.SetTarget(cntInit.Link)
|
||||
|
||||
// Multiples of the loop size are now done.
|
||||
n %= loopSize
|
||||
|
||||
off = 0
|
||||
// Copy any fractional portion.
|
||||
for n >= 8 {
|
||||
// MOVV off(srcReg), tmpReg
|
||||
// MOVV tmpReg, off(dstReg)
|
||||
move8(s, srcReg, dstReg, tmpReg, off)
|
||||
off += 8
|
||||
n -= 8
|
||||
}
|
||||
|
||||
if n != 0 {
|
||||
// MOVV off+n-8(srcReg), tmpReg
|
||||
// MOVV tmpReg, off+n-8(srcReg)
|
||||
move8(s, srcReg, dstReg, tmpReg, off+n-8)
|
||||
}
|
||||
|
||||
case ssa.OpLOONG64CALLstatic, ssa.OpLOONG64CALLclosure, ssa.OpLOONG64CALLinter:
|
||||
s.Call(v)
|
||||
|
|
@ -746,7 +811,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
|||
}
|
||||
case ssa.OpLOONG64LoweredPanicBoundsCR:
|
||||
yIsReg = true
|
||||
yVal := int(v.Args[0].Reg() - loong64.REG_R4)
|
||||
yVal = int(v.Args[0].Reg() - loong64.REG_R4)
|
||||
c := v.Aux.(ssa.PanicBoundsC).C
|
||||
if c >= 0 && c <= abi.BoundsMaxConst {
|
||||
xVal = int(c)
|
||||
|
|
@ -1225,6 +1290,24 @@ func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg in
|
|||
return p
|
||||
}
|
||||
|
||||
// move8 copies 8 bytes at src+off to dst+off.
|
||||
func move8(s *ssagen.State, src, dst, tmp int16, off int64) {
|
||||
// MOVV off(src), tmp
|
||||
ld := s.Prog(loong64.AMOVV)
|
||||
ld.From.Type = obj.TYPE_MEM
|
||||
ld.From.Reg = src
|
||||
ld.From.Offset = off
|
||||
ld.To.Type = obj.TYPE_REG
|
||||
ld.To.Reg = tmp
|
||||
// MOVV tmp, off(dst)
|
||||
st := s.Prog(loong64.AMOVV)
|
||||
st.From.Type = obj.TYPE_REG
|
||||
st.From.Reg = tmp
|
||||
st.To.Type = obj.TYPE_MEM
|
||||
st.To.Reg = dst
|
||||
st.To.Offset = off
|
||||
}
|
||||
|
||||
// zero8 zeroes 8 bytes at reg+off.
|
||||
func zero8(s *ssagen.State, reg int16, off int64) {
|
||||
// MOVV ZR, off(reg)
|
||||
|
|
|
|||
|
|
@ -5,41 +5,21 @@
|
|||
package mips
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/objw"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/obj"
|
||||
"cmd/internal/obj/mips"
|
||||
)
|
||||
|
||||
// TODO(mips): implement DUFFZERO
|
||||
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
|
||||
|
||||
if cnt == 0 {
|
||||
return p
|
||||
if cnt%int64(types.PtrSize) != 0 {
|
||||
panic("zeroed region not aligned")
|
||||
}
|
||||
if cnt < int64(4*types.PtrSize) {
|
||||
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
|
||||
p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, base.Ctxt.Arch.FixedFrameSize+off+i)
|
||||
}
|
||||
} else {
|
||||
//fmt.Printf("zerorange frame:%v, lo: %v, hi:%v \n", frame ,lo, hi)
|
||||
// ADD $(FIXED_FRAME+frame+lo-4), SP, r1
|
||||
// ADD $cnt, r1, r2
|
||||
// loop:
|
||||
// MOVW R0, (Widthptr)r1
|
||||
// ADD $Widthptr, r1
|
||||
// BNE r1, r2, loop
|
||||
p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, base.Ctxt.Arch.FixedFrameSize+off-4, obj.TYPE_REG, mips.REGRT1, 0)
|
||||
p.Reg = mips.REGSP
|
||||
p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
|
||||
p.Reg = mips.REGRT1
|
||||
p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize))
|
||||
p1 := p
|
||||
p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0)
|
||||
p = pp.Append(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
|
||||
p.Reg = mips.REGRT2
|
||||
p.To.SetTarget(p1)
|
||||
|
||||
for cnt != 0 {
|
||||
p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, off)
|
||||
cnt -= int64(types.PtrSize)
|
||||
off += int64(types.PtrSize)
|
||||
}
|
||||
|
||||
return p
|
||||
|
|
|
|||
|
|
@ -551,7 +551,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
|||
}
|
||||
case ssa.OpMIPSLoweredPanicBoundsCR:
|
||||
yIsReg = true
|
||||
yVal := int(v.Args[0].Reg() - mips.REG_R1)
|
||||
yVal = int(v.Args[0].Reg() - mips.REG_R1)
|
||||
c := v.Aux.(ssa.PanicBoundsC).C
|
||||
if c >= 0 && c <= abi.BoundsMaxConst {
|
||||
xVal = int(c)
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@
|
|||
package mips64
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/objw"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/obj"
|
||||
|
|
@ -13,37 +12,14 @@ import (
|
|||
)
|
||||
|
||||
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
|
||||
if cnt == 0 {
|
||||
return p
|
||||
if cnt%int64(types.PtrSize) != 0 {
|
||||
panic("zeroed region not aligned")
|
||||
}
|
||||
if cnt < int64(4*types.PtrSize) {
|
||||
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
|
||||
p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+off+i)
|
||||
}
|
||||
} else if cnt <= int64(128*types.PtrSize) {
|
||||
p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
|
||||
p.Reg = mips.REGSP
|
||||
p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
p.To.Sym = ir.Syms.Duffzero
|
||||
p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize))
|
||||
} else {
|
||||
// ADDV $(8+frame+lo-8), SP, r1
|
||||
// ADDV $cnt, r1, r2
|
||||
// loop:
|
||||
// MOVV R0, (Widthptr)r1
|
||||
// ADDV $Widthptr, r1
|
||||
// BNE r1, r2, loop
|
||||
p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
|
||||
p.Reg = mips.REGSP
|
||||
p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
|
||||
p.Reg = mips.REGRT1
|
||||
p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize))
|
||||
p1 := p
|
||||
p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0)
|
||||
p = pp.Append(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
|
||||
p.Reg = mips.REGRT2
|
||||
p.To.SetTarget(p1)
|
||||
|
||||
for cnt != 0 {
|
||||
p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, off)
|
||||
cnt -= int64(types.PtrSize)
|
||||
off += int64(types.PtrSize)
|
||||
}
|
||||
|
||||
return p
|
||||
|
|
|
|||
|
|
@ -115,7 +115,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
|||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = y
|
||||
}
|
||||
case ssa.OpMIPS64MOVVnop:
|
||||
case ssa.OpMIPS64MOVVnop, ssa.OpMIPS64ZERO:
|
||||
// nothing to do
|
||||
case ssa.OpLoadReg:
|
||||
if v.Type.IsFlags() {
|
||||
|
|
@ -301,16 +301,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
|||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux(&p.To, v)
|
||||
case ssa.OpMIPS64MOVBstorezero,
|
||||
ssa.OpMIPS64MOVHstorezero,
|
||||
ssa.OpMIPS64MOVWstorezero,
|
||||
ssa.OpMIPS64MOVVstorezero:
|
||||
p := s.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = mips.REGZERO
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux(&p.To, v)
|
||||
case ssa.OpMIPS64MOVBreg,
|
||||
ssa.OpMIPS64MOVBUreg,
|
||||
ssa.OpMIPS64MOVHreg,
|
||||
|
|
@ -542,7 +532,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
|||
}
|
||||
case ssa.OpMIPS64LoweredPanicBoundsCR:
|
||||
yIsReg = true
|
||||
yVal := int(v.Args[0].Reg() - mips.REG_R1)
|
||||
yVal = int(v.Args[0].Reg() - mips.REG_R1)
|
||||
c := v.Aux.(ssa.PanicBoundsC).C
|
||||
if c >= 0 && c <= abi.BoundsMaxConst {
|
||||
xVal = int(c)
|
||||
|
|
|
|||
|
|
@ -1947,7 +1947,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
|||
}
|
||||
case ssa.OpPPC64LoweredPanicBoundsCR:
|
||||
yIsReg = true
|
||||
yVal := int(v.Args[0].Reg() - ppc64.REG_R3)
|
||||
yVal = int(v.Args[0].Reg() - ppc64.REG_R3)
|
||||
c := v.Aux.(ssa.PanicBoundsC).C
|
||||
if c >= 0 && c <= abi.BoundsMaxConst {
|
||||
xVal = int(c)
|
||||
|
|
|
|||
|
|
@ -414,6 +414,10 @@ var kinds = []abi.Kind{
|
|||
types.TUNSAFEPTR: abi.UnsafePointer,
|
||||
}
|
||||
|
||||
func ABIKindOfType(t *types.Type) abi.Kind {
|
||||
return kinds[t.Kind()]
|
||||
}
|
||||
|
||||
var (
|
||||
memhashvarlen *obj.LSym
|
||||
memequalvarlen *obj.LSym
|
||||
|
|
@ -512,8 +516,7 @@ func dcommontype(c rttype.Cursor, t *types.Type) {
|
|||
c.Field("Align_").WriteUint8(uint8(t.Alignment()))
|
||||
c.Field("FieldAlign_").WriteUint8(uint8(t.Alignment()))
|
||||
|
||||
kind := kinds[t.Kind()]
|
||||
c.Field("Kind_").WriteUint8(uint8(kind))
|
||||
c.Field("Kind_").WriteUint8(uint8(ABIKindOfType(t)))
|
||||
|
||||
c.Field("Equal").WritePtr(eqfunc)
|
||||
c.Field("GCData").WritePtr(gcsym)
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ package riscv64
|
|||
|
||||
import (
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/objw"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/obj"
|
||||
|
|
@ -14,46 +13,19 @@ import (
|
|||
)
|
||||
|
||||
func zeroRange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
|
||||
if cnt == 0 {
|
||||
return p
|
||||
|
||||
if cnt%int64(types.PtrSize) != 0 {
|
||||
panic("zeroed region not aligned")
|
||||
}
|
||||
|
||||
// Adjust the frame to account for LR.
|
||||
off += base.Ctxt.Arch.FixedFrameSize
|
||||
|
||||
if cnt < int64(4*types.PtrSize) {
|
||||
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
|
||||
p = pp.Append(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_SP, off+i)
|
||||
}
|
||||
return p
|
||||
for cnt != 0 {
|
||||
p = pp.Append(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_SP, off)
|
||||
cnt -= int64(types.PtrSize)
|
||||
off += int64(types.PtrSize)
|
||||
}
|
||||
|
||||
if cnt <= int64(128*types.PtrSize) {
|
||||
p = pp.Append(p, riscv.AADDI, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_X25, 0)
|
||||
p.Reg = riscv.REG_SP
|
||||
p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
p.To.Sym = ir.Syms.Duffzero
|
||||
p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize))
|
||||
return p
|
||||
}
|
||||
|
||||
// Loop, zeroing pointer width bytes at a time.
|
||||
// ADD $(off), SP, T0
|
||||
// ADD $(cnt), T0, T1
|
||||
// loop:
|
||||
// MOV ZERO, (T0)
|
||||
// ADD $Widthptr, T0
|
||||
// BNE T0, T1, loop
|
||||
p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_T0, 0)
|
||||
p.Reg = riscv.REG_SP
|
||||
p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, riscv.REG_T1, 0)
|
||||
p.Reg = riscv.REG_T0
|
||||
p = pp.Append(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_T0, 0)
|
||||
loop := p
|
||||
p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, riscv.REG_T0, 0)
|
||||
p = pp.Append(p, riscv.ABNE, obj.TYPE_REG, riscv.REG_T0, 0, obj.TYPE_BRANCH, 0, 0)
|
||||
p.Reg = riscv.REG_T1
|
||||
p.To.SetTarget(loop)
|
||||
return p
|
||||
}
|
||||
|
|
|
|||
|
|
@ -181,6 +181,8 @@ func largestMove(alignment int64) (obj.As, int64) {
|
|||
}
|
||||
}
|
||||
|
||||
var fracMovOps = []obj.As{riscv.AMOVB, riscv.AMOVH, riscv.AMOVW, riscv.AMOV}
|
||||
|
||||
// ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
|
||||
// RISC-V has no flags, so this is a no-op.
|
||||
func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {}
|
||||
|
|
@ -544,7 +546,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
|||
}
|
||||
case ssa.OpRISCV64LoweredPanicBoundsCR:
|
||||
yIsReg = true
|
||||
yVal := int(v.Args[0].Reg() - riscv.REG_X5)
|
||||
yVal = int(v.Args[0].Reg() - riscv.REG_X5)
|
||||
c := v.Aux.(ssa.PanicBoundsC).C
|
||||
if c >= 0 && c <= abi.BoundsMaxConst {
|
||||
xVal = int(c)
|
||||
|
|
@ -738,70 +740,181 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
|||
p.RegTo2 = riscv.REG_ZERO
|
||||
|
||||
case ssa.OpRISCV64LoweredZero:
|
||||
mov, sz := largestMove(v.AuxInt)
|
||||
ptr := v.Args[0].Reg()
|
||||
sc := v.AuxValAndOff()
|
||||
n := sc.Val64()
|
||||
|
||||
// mov ZERO, (Rarg0)
|
||||
// ADD $sz, Rarg0
|
||||
// BGEU Rarg1, Rarg0, -2(PC)
|
||||
mov, sz := largestMove(sc.Off64())
|
||||
|
||||
p := s.Prog(mov)
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = riscv.REG_ZERO
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = v.Args[0].Reg()
|
||||
// mov ZERO, (offset)(Rarg0)
|
||||
var off int64
|
||||
for n >= sz {
|
||||
zeroOp(s, mov, ptr, off)
|
||||
off += sz
|
||||
n -= sz
|
||||
}
|
||||
|
||||
for i := len(fracMovOps) - 1; i >= 0; i-- {
|
||||
tsz := int64(1 << i)
|
||||
if n < tsz {
|
||||
continue
|
||||
}
|
||||
zeroOp(s, fracMovOps[i], ptr, off)
|
||||
off += tsz
|
||||
n -= tsz
|
||||
}
|
||||
|
||||
case ssa.OpRISCV64LoweredZeroLoop:
|
||||
ptr := v.Args[0].Reg()
|
||||
sc := v.AuxValAndOff()
|
||||
n := sc.Val64()
|
||||
mov, sz := largestMove(sc.Off64())
|
||||
chunk := 8 * sz
|
||||
|
||||
if n <= 3*chunk {
|
||||
v.Fatalf("ZeroLoop too small:%d, expect:%d", n, 3*chunk)
|
||||
}
|
||||
|
||||
tmp := v.RegTmp()
|
||||
|
||||
p := s.Prog(riscv.AADD)
|
||||
p.From.Type = obj.TYPE_CONST
|
||||
p.From.Offset = n - n%chunk
|
||||
p.Reg = ptr
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = tmp
|
||||
|
||||
for i := int64(0); i < 8; i++ {
|
||||
zeroOp(s, mov, ptr, sz*i)
|
||||
}
|
||||
|
||||
p2 := s.Prog(riscv.AADD)
|
||||
p2.From.Type = obj.TYPE_CONST
|
||||
p2.From.Offset = sz
|
||||
p2.From.Offset = chunk
|
||||
p2.To.Type = obj.TYPE_REG
|
||||
p2.To.Reg = v.Args[0].Reg()
|
||||
p2.To.Reg = ptr
|
||||
|
||||
p3 := s.Prog(riscv.ABGEU)
|
||||
p3.To.Type = obj.TYPE_BRANCH
|
||||
p3.Reg = v.Args[0].Reg()
|
||||
p3 := s.Prog(riscv.ABNE)
|
||||
p3.From.Reg = tmp
|
||||
p3.From.Type = obj.TYPE_REG
|
||||
p3.From.Reg = v.Args[1].Reg()
|
||||
p3.To.SetTarget(p)
|
||||
p3.Reg = ptr
|
||||
p3.To.Type = obj.TYPE_BRANCH
|
||||
p3.To.SetTarget(p.Link)
|
||||
|
||||
n %= chunk
|
||||
|
||||
// mov ZERO, (offset)(Rarg0)
|
||||
var off int64
|
||||
for n >= sz {
|
||||
zeroOp(s, mov, ptr, off)
|
||||
off += sz
|
||||
n -= sz
|
||||
}
|
||||
|
||||
for i := len(fracMovOps) - 1; i >= 0; i-- {
|
||||
tsz := int64(1 << i)
|
||||
if n < tsz {
|
||||
continue
|
||||
}
|
||||
zeroOp(s, fracMovOps[i], ptr, off)
|
||||
off += tsz
|
||||
n -= tsz
|
||||
}
|
||||
|
||||
case ssa.OpRISCV64LoweredMove:
|
||||
mov, sz := largestMove(v.AuxInt)
|
||||
dst := v.Args[0].Reg()
|
||||
src := v.Args[1].Reg()
|
||||
if dst == src {
|
||||
break
|
||||
}
|
||||
|
||||
// mov (Rarg1), T2
|
||||
// mov T2, (Rarg0)
|
||||
// ADD $sz, Rarg0
|
||||
// ADD $sz, Rarg1
|
||||
// BGEU Rarg2, Rarg0, -4(PC)
|
||||
sa := v.AuxValAndOff()
|
||||
n := sa.Val64()
|
||||
mov, sz := largestMove(sa.Off64())
|
||||
|
||||
p := s.Prog(mov)
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Reg = v.Args[1].Reg()
|
||||
var off int64
|
||||
tmp := int16(riscv.REG_X5)
|
||||
for n >= sz {
|
||||
moveOp(s, mov, dst, src, tmp, off)
|
||||
off += sz
|
||||
n -= sz
|
||||
}
|
||||
|
||||
for i := len(fracMovOps) - 1; i >= 0; i-- {
|
||||
tsz := int64(1 << i)
|
||||
if n < tsz {
|
||||
continue
|
||||
}
|
||||
moveOp(s, fracMovOps[i], dst, src, tmp, off)
|
||||
off += tsz
|
||||
n -= tsz
|
||||
}
|
||||
|
||||
case ssa.OpRISCV64LoweredMoveLoop:
|
||||
dst := v.Args[0].Reg()
|
||||
src := v.Args[1].Reg()
|
||||
if dst == src {
|
||||
break
|
||||
}
|
||||
|
||||
sc := v.AuxValAndOff()
|
||||
n := sc.Val64()
|
||||
mov, sz := largestMove(sc.Off64())
|
||||
chunk := 8 * sz
|
||||
|
||||
if n <= 3*chunk {
|
||||
v.Fatalf("MoveLoop too small:%d, expect:%d", n, 3*chunk)
|
||||
}
|
||||
tmp := int16(riscv.REG_X5)
|
||||
|
||||
p := s.Prog(riscv.AADD)
|
||||
p.From.Type = obj.TYPE_CONST
|
||||
p.From.Offset = n - n%chunk
|
||||
p.Reg = src
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = riscv.REG_T2
|
||||
p.To.Reg = riscv.REG_X6
|
||||
|
||||
p2 := s.Prog(mov)
|
||||
p2.From.Type = obj.TYPE_REG
|
||||
p2.From.Reg = riscv.REG_T2
|
||||
p2.To.Type = obj.TYPE_MEM
|
||||
p2.To.Reg = v.Args[0].Reg()
|
||||
for i := int64(0); i < 8; i++ {
|
||||
moveOp(s, mov, dst, src, tmp, sz*i)
|
||||
}
|
||||
|
||||
p3 := s.Prog(riscv.AADD)
|
||||
p3.From.Type = obj.TYPE_CONST
|
||||
p3.From.Offset = sz
|
||||
p3.To.Type = obj.TYPE_REG
|
||||
p3.To.Reg = v.Args[0].Reg()
|
||||
p1 := s.Prog(riscv.AADD)
|
||||
p1.From.Type = obj.TYPE_CONST
|
||||
p1.From.Offset = chunk
|
||||
p1.To.Type = obj.TYPE_REG
|
||||
p1.To.Reg = src
|
||||
|
||||
p4 := s.Prog(riscv.AADD)
|
||||
p4.From.Type = obj.TYPE_CONST
|
||||
p4.From.Offset = sz
|
||||
p4.To.Type = obj.TYPE_REG
|
||||
p4.To.Reg = v.Args[1].Reg()
|
||||
p2 := s.Prog(riscv.AADD)
|
||||
p2.From.Type = obj.TYPE_CONST
|
||||
p2.From.Offset = chunk
|
||||
p2.To.Type = obj.TYPE_REG
|
||||
p2.To.Reg = dst
|
||||
|
||||
p5 := s.Prog(riscv.ABGEU)
|
||||
p5.To.Type = obj.TYPE_BRANCH
|
||||
p5.Reg = v.Args[1].Reg()
|
||||
p5.From.Type = obj.TYPE_REG
|
||||
p5.From.Reg = v.Args[2].Reg()
|
||||
p5.To.SetTarget(p)
|
||||
p3 := s.Prog(riscv.ABNE)
|
||||
p3.From.Reg = riscv.REG_X6
|
||||
p3.From.Type = obj.TYPE_REG
|
||||
p3.Reg = src
|
||||
p3.To.Type = obj.TYPE_BRANCH
|
||||
p3.To.SetTarget(p.Link)
|
||||
|
||||
n %= chunk
|
||||
|
||||
var off int64
|
||||
for n >= sz {
|
||||
moveOp(s, mov, dst, src, tmp, off)
|
||||
off += sz
|
||||
n -= sz
|
||||
}
|
||||
|
||||
for i := len(fracMovOps) - 1; i >= 0; i-- {
|
||||
tsz := int64(1 << i)
|
||||
if n < tsz {
|
||||
continue
|
||||
}
|
||||
moveOp(s, fracMovOps[i], dst, src, tmp, off)
|
||||
off += tsz
|
||||
n -= tsz
|
||||
}
|
||||
|
||||
case ssa.OpRISCV64LoweredNilCheck:
|
||||
// Issue a load which will fault if arg is nil.
|
||||
|
|
@ -836,20 +949,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
|||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Reg()
|
||||
|
||||
case ssa.OpRISCV64DUFFZERO:
|
||||
p := s.Prog(obj.ADUFFZERO)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
p.To.Sym = ir.Syms.Duffzero
|
||||
p.To.Offset = v.AuxInt
|
||||
|
||||
case ssa.OpRISCV64DUFFCOPY:
|
||||
p := s.Prog(obj.ADUFFCOPY)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
p.To.Sym = ir.Syms.Duffcopy
|
||||
p.To.Offset = v.AuxInt
|
||||
|
||||
case ssa.OpRISCV64LoweredPubBarrier:
|
||||
// FENCE
|
||||
s.Prog(v.Op.Asm())
|
||||
|
|
@ -955,3 +1054,31 @@ func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg in
|
|||
p.Pos = p.Pos.WithNotStmt()
|
||||
return p
|
||||
}
|
||||
|
||||
func zeroOp(s *ssagen.State, mov obj.As, reg int16, off int64) {
|
||||
p := s.Prog(mov)
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = riscv.REG_ZERO
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = reg
|
||||
p.To.Offset = off
|
||||
return
|
||||
}
|
||||
|
||||
func moveOp(s *ssagen.State, mov obj.As, dst int16, src int16, tmp int16, off int64) {
|
||||
p := s.Prog(mov)
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Reg = src
|
||||
p.From.Offset = off
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = tmp
|
||||
|
||||
p1 := s.Prog(mov)
|
||||
p1.From.Type = obj.TYPE_REG
|
||||
p1.From.Reg = tmp
|
||||
p1.To.Type = obj.TYPE_MEM
|
||||
p1.To.Reg = dst
|
||||
p1.To.Offset = off
|
||||
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -608,7 +608,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
|||
}
|
||||
case ssa.OpS390XLoweredPanicBoundsCR:
|
||||
yIsReg = true
|
||||
yVal := int(v.Args[0].Reg() - s390x.REG_R0)
|
||||
yVal = int(v.Args[0].Reg() - s390x.REG_R0)
|
||||
c := v.Aux.(ssa.PanicBoundsC).C
|
||||
if c >= 0 && c <= abi.BoundsMaxConst {
|
||||
xVal = int(c)
|
||||
|
|
|
|||
|
|
@ -156,12 +156,14 @@ func init() {
|
|||
gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
|
||||
gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}}
|
||||
gp1flags = regInfo{inputs: []regMask{gpg}}
|
||||
gp1flagsflags = regInfo{inputs: []regMask{gpg}}
|
||||
gp1flags1 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
|
||||
gp11flags = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp, 0}}
|
||||
gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}}
|
||||
gp21nog = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}}
|
||||
gp21flags = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}}
|
||||
gp2flags = regInfo{inputs: []regMask{gpg, gpg}}
|
||||
gp2flagsflags = regInfo{inputs: []regMask{gpg, gpg}}
|
||||
gp2flags1 = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}}
|
||||
gp2flags1flags = regInfo{inputs: []regMask{gp, gp, 0}, outputs: []regMask{gp, 0}}
|
||||
gp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
|
||||
|
|
@ -508,6 +510,22 @@ func init() {
|
|||
{name: "CSNEG", argLength: 3, reg: gp2flags1, asm: "CSNEG", aux: "CCop"}, // auxint(flags) ? arg0 : -arg1
|
||||
{name: "CSETM", argLength: 1, reg: readflags, asm: "CSETM", aux: "CCop"}, // auxint(flags) ? -1 : 0
|
||||
|
||||
// conditional comparison instructions; auxint is
|
||||
// combination of Cond, Nzcv and optional ConstValue
|
||||
// Behavior:
|
||||
// If the condition 'Cond' evaluates to true against current flags,
|
||||
// flags are set to the result of the comparison operation.
|
||||
// Otherwise, flags are set to the fallback value 'Nzcv'.
|
||||
{name: "CCMP", argLength: 3, reg: gp2flagsflags, asm: "CCMP", aux: "ARM64ConditionalParams", typ: "Flag"}, // If Cond then flags = CMP arg0 arg1 else flags = Nzcv
|
||||
{name: "CCMN", argLength: 3, reg: gp2flagsflags, asm: "CCMN", aux: "ARM64ConditionalParams", typ: "Flag"}, // If Cond then flags = CMN arg0 arg1 else flags = Nzcv
|
||||
{name: "CCMPconst", argLength: 2, reg: gp1flagsflags, asm: "CCMP", aux: "ARM64ConditionalParams", typ: "Flag"}, // If Cond then flags = CMPconst [ConstValue] arg0 else flags = Nzcv
|
||||
{name: "CCMNconst", argLength: 2, reg: gp1flagsflags, asm: "CCMN", aux: "ARM64ConditionalParams", typ: "Flag"}, // If Cond then flags = CMNconst [ConstValue] arg0 else flags = Nzcv
|
||||
|
||||
{name: "CCMPW", argLength: 3, reg: gp2flagsflags, asm: "CCMPW", aux: "ARM64ConditionalParams", typ: "Flag"}, // If Cond then flags = CMPW arg0 arg1 else flags = Nzcv
|
||||
{name: "CCMNW", argLength: 3, reg: gp2flagsflags, asm: "CCMNW", aux: "ARM64ConditionalParams", typ: "Flag"}, // If Cond then flags = CMNW arg0 arg1 else flags = Nzcv
|
||||
{name: "CCMPWconst", argLength: 2, reg: gp1flagsflags, asm: "CCMPW", aux: "ARM64ConditionalParams", typ: "Flag"}, // If Cond then flags = CCMPWconst [ConstValue] arg0 else flags = Nzcv
|
||||
{name: "CCMNWconst", argLength: 2, reg: gp1flagsflags, asm: "CCMNW", aux: "ARM64ConditionalParams", typ: "Flag"}, // If Cond then flags = CCMNWconst [ConstValue] arg0 else flags = Nzcv
|
||||
|
||||
// function calls
|
||||
{name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
|
||||
{name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
|
||||
|
|
|
|||
|
|
@ -17,8 +17,8 @@
|
|||
|
||||
(Hmul64 ...) => (MULHV ...)
|
||||
(Hmul64u ...) => (MULHVU ...)
|
||||
(Hmul32 x y) => (SRAVconst (MULV (SignExt32to64 x) (SignExt32to64 y)) [32])
|
||||
(Hmul32u x y) => (SRLVconst (MULV (ZeroExt32to64 x) (ZeroExt32to64 y)) [32])
|
||||
(Hmul32 ...) => (MULH ...)
|
||||
(Hmul32u ...) => (MULHU ...)
|
||||
|
||||
(Div64 x y) => (DIVV x y)
|
||||
(Div64u ...) => (DIVVU ...)
|
||||
|
|
@ -419,34 +419,8 @@
|
|||
(MOVVstore [8] dst (MOVVload [8] src mem)
|
||||
(MOVVstore dst (MOVVload src mem) mem))
|
||||
|
||||
// strip off fractional word move
|
||||
(Move [s] dst src mem) && s%8 != 0 && s > 16 =>
|
||||
(Move [s%8]
|
||||
(OffPtr <dst.Type> dst [s-s%8])
|
||||
(OffPtr <src.Type> src [s-s%8])
|
||||
(Move [s-s%8] dst src mem))
|
||||
|
||||
// medium move uses a duff device
|
||||
(Move [s] dst src mem)
|
||||
&& s%8 == 0 && s > 16 && s <= 8*128
|
||||
&& logLargeCopy(v, s) =>
|
||||
(DUFFCOPY [16 * (128 - s/8)] dst src mem)
|
||||
// 16 and 128 are magic constants. 16 is the number of bytes to encode:
|
||||
// MOVV (R20), R30
|
||||
// ADDV $8, R20
|
||||
// MOVV R30, (R21)
|
||||
// ADDV $8, R21
|
||||
// and 128 is the number of such blocks. See runtime/duff_loong64.s:duffcopy.
|
||||
|
||||
// large move uses a loop
|
||||
(Move [s] dst src mem)
|
||||
&& s%8 == 0 && s > 1024 && logLargeCopy(v, s) =>
|
||||
(LoweredMove
|
||||
dst
|
||||
src
|
||||
(ADDVconst <src.Type> src [s-8])
|
||||
mem)
|
||||
|
||||
(Move [s] dst src mem) && s > 16 && s < 192 && logLargeCopy(v, s) => (LoweredMove [s] dst src mem)
|
||||
(Move [s] dst src mem) && s >= 192 && logLargeCopy(v, s) => (LoweredMoveLoop [s] dst src mem)
|
||||
|
||||
// float <=> int register moves, with no conversion.
|
||||
// These come up when compiling math.{Float64bits, Float64frombits, Float32bits, Float32frombits}.
|
||||
|
|
@ -455,6 +429,10 @@
|
|||
(MOVWUload [off] {sym} ptr (MOVFstore [off] {sym} ptr val _)) => (ZeroExt32to64 (MOVWfpgp <typ.Float32> val))
|
||||
(MOVFload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) => (MOVWgpfp val)
|
||||
|
||||
// If the memory load and store operations use the same ptr, they are combined into a direct move operation between registers.
|
||||
(MOV(V|W|H|B)load [off] {sym} ptr (MOV(V|W|H|B)store [off] {sym} ptr x _)) => (MOV(V|W|H|B)reg x)
|
||||
(MOV(W|H|B)Uload [off] {sym} ptr (MOV(W|H|B)store [off] {sym} ptr x _)) => (MOV(W|H|B)Ureg x)
|
||||
|
||||
// Similarly for stores, if we see a store after FPR <=> GPR move, then redirect store to use the other register set.
|
||||
(MOVVstore [off] {sym} ptr (MOVVfpgp val) mem) => (MOVDstore [off] {sym} ptr val mem)
|
||||
(MOVDstore [off] {sym} ptr (MOVVgpfp val) mem) => (MOVVstore [off] {sym} ptr val mem)
|
||||
|
|
@ -752,6 +730,9 @@
|
|||
|
||||
(ADDV x0 x1:(SLLVconst [c] y)) && x1.Uses == 1 && c > 0 && c <= 4 => (ADDshiftLLV x0 y [c])
|
||||
|
||||
// fold constant in ADDshift op
|
||||
(ADDshiftLLV x (MOVVconst [c]) [d]) && is12Bit(c<<d) => (ADDVconst x [c<<d])
|
||||
|
||||
// div by constant
|
||||
(DIVVU x (MOVVconst [1])) => x
|
||||
(DIVVU x (MOVVconst [c])) && isPowerOfTwo(c) => (SRLVconst [log64(c)] x)
|
||||
|
|
@ -816,6 +797,7 @@
|
|||
(SUBVconst [c] (SUBVconst [d] x)) && is32Bit(-c-d) => (ADDVconst [-c-d] x)
|
||||
(SUBVconst [c] (ADDVconst [d] x)) && is32Bit(-c+d) => (ADDVconst [-c+d] x)
|
||||
(SUBV (MOVVconst [c]) (NEGV (SUBVconst [d] x))) => (ADDVconst [c-d] x)
|
||||
(ADDVconst [c] x) && is32Bit(c) && c&0xffff == 0 && c != 0 => (ADDV16const [c] x)
|
||||
(SLLVconst [c] (MOVVconst [d])) => (MOVVconst [d<<uint64(c)])
|
||||
(SRLVconst [c] (MOVVconst [d])) => (MOVVconst [int64(uint64(d)>>uint64(c))])
|
||||
(SRAVconst [c] (MOVVconst [d])) => (MOVVconst [d>>uint64(c)])
|
||||
|
|
@ -970,3 +952,12 @@
|
|||
&& isInlinableMemmove(dst, src, sz, config)
|
||||
&& clobber(call)
|
||||
=> (Move [sz] dst src mem)
|
||||
|
||||
// fold readonly sym load
|
||||
(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read8(sym, int64(off)))])
|
||||
(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
|
||||
(MOVWUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
|
||||
(MOVVload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
|
||||
(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(int8(read8(sym, int64(off))))])
|
||||
(MOVHload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
|
||||
(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
|
||||
|
|
|
|||
|
|
@ -189,14 +189,17 @@ func init() {
|
|||
{name: "VPCNT16", argLength: 1, reg: fp11, asm: "VPCNTH"}, // count set bits for each 16-bit unit and store the result in each 16-bit unit
|
||||
|
||||
// binary ops
|
||||
{name: "ADDV", argLength: 2, reg: gp21, asm: "ADDVU", commutative: true}, // arg0 + arg1
|
||||
{name: "ADDVconst", argLength: 1, reg: gp11sp, asm: "ADDVU", aux: "Int64"}, // arg0 + auxInt. auxInt is 32-bit, also in other *const ops.
|
||||
{name: "SUBV", argLength: 2, reg: gp21, asm: "SUBVU"}, // arg0 - arg1
|
||||
{name: "SUBVconst", argLength: 1, reg: gp11, asm: "SUBVU", aux: "Int64"}, // arg0 - auxInt
|
||||
{name: "ADDV", argLength: 2, reg: gp21, asm: "ADDVU", commutative: true}, // arg0 + arg1
|
||||
{name: "ADDVconst", argLength: 1, reg: gp11sp, asm: "ADDVU", aux: "Int64"}, // arg0 + auxInt. auxInt is 32-bit, also in other *const ops.
|
||||
{name: "ADDV16const", argLength: 1, reg: gp11sp, asm: "ADDV16", aux: "Int64"}, // arg0 + auxInt. auxInt is signed 32-bit and is a multiple of 65536, also in other *const ops.
|
||||
{name: "SUBV", argLength: 2, reg: gp21, asm: "SUBVU"}, // arg0 - arg1
|
||||
{name: "SUBVconst", argLength: 1, reg: gp11, asm: "SUBVU", aux: "Int64"}, // arg0 - auxInt
|
||||
|
||||
{name: "MULV", argLength: 2, reg: gp21, asm: "MULV", commutative: true, typ: "Int64"}, // arg0 * arg1
|
||||
{name: "MULHV", argLength: 2, reg: gp21, asm: "MULHV", commutative: true, typ: "Int64"}, // (arg0 * arg1) >> 64, signed
|
||||
{name: "MULHVU", argLength: 2, reg: gp21, asm: "MULHVU", commutative: true, typ: "UInt64"}, // (arg0 * arg1) >> 64, unsigned
|
||||
{name: "MULH", argLength: 2, reg: gp21, asm: "MULH", commutative: true, typ: "Int32"}, // (arg0 * arg1) >> 32, signed
|
||||
{name: "MULHU", argLength: 2, reg: gp21, asm: "MULHU", commutative: true, typ: "UInt32"}, // (arg0 * arg1) >> 32, unsigned
|
||||
{name: "DIVV", argLength: 2, reg: gp21, asm: "DIVV", typ: "Int64"}, // arg0 / arg1, signed
|
||||
{name: "DIVVU", argLength: 2, reg: gp21, asm: "DIVVU", typ: "UInt64"}, // arg0 / arg1, unsigned
|
||||
{name: "REMV", argLength: 2, reg: gp21, asm: "REMV", typ: "Int64"}, // arg0 / arg1, signed
|
||||
|
|
@ -358,24 +361,6 @@ func init() {
|
|||
{name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{gpsp, buildReg("R29"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem
|
||||
{name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem
|
||||
|
||||
// duffzero
|
||||
// arg0 = address of memory to zero
|
||||
// arg1 = mem
|
||||
// auxint = offset into duffzero code to start executing
|
||||
// returns mem
|
||||
// R20 aka loong64.REGRT1 changed as side effect
|
||||
{
|
||||
name: "DUFFZERO",
|
||||
aux: "Int64",
|
||||
argLength: 2,
|
||||
reg: regInfo{
|
||||
inputs: []regMask{buildReg("R20")},
|
||||
clobbers: buildReg("R20 R1"),
|
||||
},
|
||||
typ: "Mem",
|
||||
faultOnNilArg0: true,
|
||||
},
|
||||
|
||||
// medium zeroing
|
||||
// arg0 = address of memory to zero
|
||||
// arg1 = mem
|
||||
|
|
@ -391,25 +376,6 @@ func init() {
|
|||
faultOnNilArg0: true,
|
||||
},
|
||||
|
||||
// duffcopy
|
||||
// arg0 = address of dst memory (in R21, changed as side effect)
|
||||
// arg1 = address of src memory (in R20, changed as side effect)
|
||||
// arg2 = mem
|
||||
// auxint = offset into duffcopy code to start executing
|
||||
// returns mem
|
||||
{
|
||||
name: "DUFFCOPY",
|
||||
aux: "Int64",
|
||||
argLength: 3,
|
||||
reg: regInfo{
|
||||
inputs: []regMask{buildReg("R21"), buildReg("R20")},
|
||||
clobbers: buildReg("R20 R21 R1"),
|
||||
},
|
||||
typ: "Mem",
|
||||
faultOnNilArg0: true,
|
||||
faultOnNilArg1: true,
|
||||
},
|
||||
|
||||
// large zeroing
|
||||
// arg0 = address of memory to zero
|
||||
// arg1 = mem
|
||||
|
|
@ -427,27 +393,40 @@ func init() {
|
|||
needIntTemp: true,
|
||||
},
|
||||
|
||||
// large or unaligned move
|
||||
// arg0 = address of dst memory (in R21, changed as side effect)
|
||||
// arg1 = address of src memory (in R20, changed as side effect)
|
||||
// arg2 = address of the last element of src
|
||||
// arg3 = mem
|
||||
// auxint = alignment
|
||||
// medium copying
|
||||
// arg0 = address of dst memory
|
||||
// arg1 = address of src memory
|
||||
// arg2 = mem
|
||||
// auxint = number of bytes to copy
|
||||
// returns mem
|
||||
// MOVx (R20), Rtmp
|
||||
// MOVx Rtmp, (R21)
|
||||
// ADDV $sz, R20
|
||||
// ADDV $sz, R21
|
||||
// BGEU Rarg2, R20, -4(PC)
|
||||
{
|
||||
name: "LoweredMove",
|
||||
aux: "Int64",
|
||||
argLength: 4,
|
||||
argLength: 3,
|
||||
reg: regInfo{
|
||||
inputs: []regMask{buildReg("R21"), buildReg("R20"), gp},
|
||||
clobbers: buildReg("R20 R21"),
|
||||
inputs: []regMask{gp &^ buildReg("R20"), gp &^ buildReg("R20")},
|
||||
clobbers: buildReg("R20"),
|
||||
},
|
||||
faultOnNilArg0: true,
|
||||
faultOnNilArg1: true,
|
||||
},
|
||||
|
||||
// large copying
|
||||
// arg0 = address of dst memory
|
||||
// arg1 = address of src memory
|
||||
// arg2 = mem
|
||||
// auxint = number of bytes to copy
|
||||
// returns mem
|
||||
{
|
||||
name: "LoweredMoveLoop",
|
||||
aux: "Int64",
|
||||
argLength: 3,
|
||||
reg: regInfo{
|
||||
inputs: []regMask{gp &^ buildReg("R20 R21"), gp &^ buildReg("R20 R21")},
|
||||
clobbers: buildReg("R20 R21"),
|
||||
clobbersArg0: true,
|
||||
clobbersArg1: true,
|
||||
},
|
||||
typ: "Mem",
|
||||
faultOnNilArg0: true,
|
||||
faultOnNilArg1: true,
|
||||
},
|
||||
|
|
|
|||
|
|
@ -544,14 +544,6 @@
|
|||
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVFstore [off1+int32(off2)] {sym} ptr val mem)
|
||||
(MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
|
||||
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
|
||||
(MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
|
||||
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
|
||||
(MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
|
||||
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
|
||||
(MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
|
||||
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
|
||||
(MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
|
||||
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVVstorezero [off1+int32(off2)] {sym} ptr mem)
|
||||
|
||||
(MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
|
||||
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
|
||||
|
|
@ -614,28 +606,6 @@
|
|||
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
|
||||
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
|
||||
(MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
|
||||
(MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
|
||||
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
|
||||
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
|
||||
(MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
|
||||
(MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
|
||||
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
|
||||
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
|
||||
(MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
|
||||
(MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
|
||||
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
|
||||
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
|
||||
(MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
|
||||
(MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
|
||||
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
|
||||
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
|
||||
(MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
|
||||
|
||||
// store zero
|
||||
(MOVBstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
|
||||
(MOVHstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
|
||||
(MOVWstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
|
||||
(MOVVstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVVstorezero [off] {sym} ptr mem)
|
||||
|
||||
// don't extend after proper load
|
||||
(MOVBreg x:(MOVBload _ _)) => (MOVVreg x)
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ import "strings"
|
|||
// so that regmask stays within int64
|
||||
// Be careful when hand coding regmasks.
|
||||
var regNamesMIPS64 = []string{
|
||||
"R0", // constant 0
|
||||
"ZERO", // constant 0
|
||||
"R1",
|
||||
"R2",
|
||||
"R3",
|
||||
|
|
@ -137,16 +137,17 @@ func init() {
|
|||
hi = buildReg("HI")
|
||||
callerSave = gp | fp | lo | hi | buildReg("g") // runtime.setg (and anything calling it) may clobber g
|
||||
first16 = buildReg("R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16")
|
||||
rz = buildReg("ZERO")
|
||||
)
|
||||
// Common regInfo
|
||||
var (
|
||||
gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
|
||||
gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
|
||||
gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}}
|
||||
gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}}
|
||||
gp21 = regInfo{inputs: []regMask{gpg, gpg | rz}, outputs: []regMask{gp}}
|
||||
gp2hilo = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{hi, lo}}
|
||||
gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}
|
||||
gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}}
|
||||
gpstore = regInfo{inputs: []regMask{gpspsbg, gpg | rz}}
|
||||
gpstore0 = regInfo{inputs: []regMask{gpspsbg}}
|
||||
gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
|
||||
gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}}
|
||||
|
|
@ -242,10 +243,7 @@ func init() {
|
|||
{name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
|
||||
{name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
|
||||
|
||||
{name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem.
|
||||
{name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem.
|
||||
{name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of zero to arg0 + auxInt + aux. arg1=mem.
|
||||
{name: "MOVVstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of zero to arg0 + auxInt + aux. ar12=mem.
|
||||
{name: "ZERO", zeroWidth: true, fixedReg: true},
|
||||
|
||||
// moves (no conversion)
|
||||
{name: "MOVWfpgp", argLength: 1, reg: fpgp, asm: "MOVW"}, // move float32 to int32 (no conversion). MIPS64 will perform sign-extend to 64-bit by default
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package runtime
|
||||
|
||||
func osArchInit() {}
|
||||
// use zero register
|
||||
(MOVVconst [0]) => (ZERO)
|
||||
|
|
@ -373,36 +373,14 @@
|
|||
(MOVHstore [4] ptr (MOVDconst [0])
|
||||
(MOVHstore [2] ptr (MOVDconst [0])
|
||||
(MOVHstore ptr (MOVDconst [0]) mem)))
|
||||
(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 =>
|
||||
(MOVWstore [8] ptr (MOVDconst [0])
|
||||
(MOVWstore [4] ptr (MOVDconst [0])
|
||||
(MOVWstore ptr (MOVDconst [0]) mem)))
|
||||
(Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 =>
|
||||
(MOVDstore [8] ptr (MOVDconst [0])
|
||||
(MOVDstore ptr (MOVDconst [0]) mem))
|
||||
(Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 =>
|
||||
(MOVDstore [16] ptr (MOVDconst [0])
|
||||
(MOVDstore [8] ptr (MOVDconst [0])
|
||||
(MOVDstore ptr (MOVDconst [0]) mem)))
|
||||
(Zero [32] {t} ptr mem) && t.Alignment()%8 == 0 =>
|
||||
(MOVDstore [24] ptr (MOVDconst [0])
|
||||
(MOVDstore [16] ptr (MOVDconst [0])
|
||||
(MOVDstore [8] ptr (MOVDconst [0])
|
||||
(MOVDstore ptr (MOVDconst [0]) mem))))
|
||||
|
||||
// Medium 8-aligned zeroing uses a Duff's device
|
||||
// 8 and 128 are magic constants, see runtime/mkduff.go
|
||||
(Zero [s] {t} ptr mem)
|
||||
&& s%8 == 0 && s <= 8*128
|
||||
&& t.Alignment()%8 == 0 =>
|
||||
(DUFFZERO [8 * (128 - s/8)] ptr mem)
|
||||
// Unroll zeroing in medium size (at most 192 bytes i.e. 3 cachelines)
|
||||
(Zero [s] {t} ptr mem) && s <= 24*moveSize(t.Alignment(), config) =>
|
||||
(LoweredZero [makeValAndOff(int32(s),int32(t.Alignment()))] ptr mem)
|
||||
|
||||
// Generic zeroing uses a loop
|
||||
(Zero [s] {t} ptr mem) =>
|
||||
(LoweredZero [t.Alignment()]
|
||||
ptr
|
||||
(ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.Alignment(), config)]))
|
||||
mem)
|
||||
(Zero [s] {t} ptr mem) && s > 24*moveSize(t.Alignment(), config) =>
|
||||
(LoweredZeroLoop [makeValAndOff(int32(s),int32(t.Alignment()))] ptr mem)
|
||||
|
||||
// Checks
|
||||
(IsNonNil ...) => (SNEZ ...)
|
||||
|
|
@ -464,37 +442,16 @@
|
|||
(MOVHstore [4] dst (MOVHload [4] src mem)
|
||||
(MOVHstore [2] dst (MOVHload [2] src mem)
|
||||
(MOVHstore dst (MOVHload src mem) mem)))
|
||||
(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 =>
|
||||
(MOVWstore [8] dst (MOVWload [8] src mem)
|
||||
(MOVWstore [4] dst (MOVWload [4] src mem)
|
||||
(MOVWstore dst (MOVWload src mem) mem)))
|
||||
(Move [16] {t} dst src mem) && t.Alignment()%8 == 0 =>
|
||||
(MOVDstore [8] dst (MOVDload [8] src mem)
|
||||
(MOVDstore dst (MOVDload src mem) mem))
|
||||
(Move [24] {t} dst src mem) && t.Alignment()%8 == 0 =>
|
||||
(MOVDstore [16] dst (MOVDload [16] src mem)
|
||||
(MOVDstore [8] dst (MOVDload [8] src mem)
|
||||
(MOVDstore dst (MOVDload src mem) mem)))
|
||||
(Move [32] {t} dst src mem) && t.Alignment()%8 == 0 =>
|
||||
(MOVDstore [24] dst (MOVDload [24] src mem)
|
||||
(MOVDstore [16] dst (MOVDload [16] src mem)
|
||||
(MOVDstore [8] dst (MOVDload [8] src mem)
|
||||
(MOVDstore dst (MOVDload src mem) mem))))
|
||||
|
||||
// Medium 8-aligned move uses a Duff's device
|
||||
// 16 and 128 are magic constants, see runtime/mkduff.go
|
||||
(Move [s] {t} dst src mem)
|
||||
&& s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0
|
||||
// Generic move
|
||||
(Move [s] {t} dst src mem) && s > 0 && s <= 3*8*moveSize(t.Alignment(), config)
|
||||
&& logLargeCopy(v, s) =>
|
||||
(DUFFCOPY [16 * (128 - s/8)] dst src mem)
|
||||
(LoweredMove [makeValAndOff(int32(s),int32(t.Alignment()))] dst src mem)
|
||||
|
||||
// Generic move uses a loop
|
||||
(Move [s] {t} dst src mem) && (s <= 16 || logLargeCopy(v, s)) =>
|
||||
(LoweredMove [t.Alignment()]
|
||||
dst
|
||||
src
|
||||
(ADDI <src.Type> [s-moveSize(t.Alignment(), config)] src)
|
||||
mem)
|
||||
(Move [s] {t} dst src mem) && s > 3*8*moveSize(t.Alignment(), config)
|
||||
&& logLargeCopy(v, s) =>
|
||||
(LoweredMoveLoop [makeValAndOff(int32(s),int32(t.Alignment()))] dst src mem)
|
||||
|
||||
// Boolean ops; 0=false, 1=true
|
||||
(AndB ...) => (AND ...)
|
||||
|
|
@ -716,6 +673,8 @@
|
|||
(MOVWUreg <t> x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWUload <t> [off] {sym} ptr mem)
|
||||
|
||||
// Replace load from same location as preceding store with copy.
|
||||
(MOV(D|W|H|B)load [off] {sym} ptr1 (MOV(D|W|H|B)store [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOV(D|W|H|B)reg x)
|
||||
(MOV(W|H|B)Uload [off] {sym} ptr1 (MOV(W|H|B)store [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOV(W|H|B)Ureg x)
|
||||
(MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVXD x)
|
||||
(FMOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVDX x)
|
||||
(MOVWload [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVXS x)
|
||||
|
|
|
|||
|
|
@ -117,6 +117,7 @@ func init() {
|
|||
|
||||
regCtxt := regNamed["X26"]
|
||||
callerSave := gpMask | fpMask | regNamed["g"]
|
||||
r5toR6 := regNamed["X5"] | regNamed["X6"]
|
||||
|
||||
var (
|
||||
gpstore = regInfo{inputs: []regMask{gpspsbMask, gpspMask, 0}} // SB in first input so we can load from a global, but not in second to avoid using SB as a temporary register
|
||||
|
|
@ -277,89 +278,90 @@ func init() {
|
|||
{name: "CALLclosure", argLength: -1, reg: callClosure, aux: "CallOff", call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem
|
||||
{name: "CALLinter", argLength: -1, reg: callInter, aux: "CallOff", call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem
|
||||
|
||||
// duffzero
|
||||
// arg0 = address of memory to zero (in X25, changed as side effect)
|
||||
// Generic moves and zeros
|
||||
|
||||
// general unrolled zeroing
|
||||
// arg0 = address of memory to zero
|
||||
// arg1 = mem
|
||||
// auxint = offset into duffzero code to start executing
|
||||
// X1 (link register) changed because of function call
|
||||
// auxint = element size and type alignment
|
||||
// returns mem
|
||||
// mov ZERO, (OFFSET)(Rarg0)
|
||||
{
|
||||
name: "LoweredZero",
|
||||
aux: "SymValAndOff",
|
||||
typ: "Mem",
|
||||
argLength: 2,
|
||||
symEffect: "Write",
|
||||
faultOnNilArg0: true,
|
||||
reg: regInfo{
|
||||
inputs: []regMask{gpMask},
|
||||
},
|
||||
},
|
||||
// general unaligned zeroing
|
||||
// arg0 = address of memory to zero (clobber)
|
||||
// arg2 = mem
|
||||
// auxint = element size and type alignment
|
||||
// returns mem
|
||||
{
|
||||
name: "DUFFZERO",
|
||||
aux: "Int64",
|
||||
argLength: 2,
|
||||
reg: regInfo{
|
||||
inputs: []regMask{regNamed["X25"]},
|
||||
clobbers: regNamed["X1"] | regNamed["X25"],
|
||||
},
|
||||
name: "LoweredZeroLoop",
|
||||
aux: "SymValAndOff",
|
||||
typ: "Mem",
|
||||
argLength: 2,
|
||||
symEffect: "Write",
|
||||
needIntTemp: true,
|
||||
faultOnNilArg0: true,
|
||||
reg: regInfo{
|
||||
inputs: []regMask{gpMask},
|
||||
clobbersArg0: true,
|
||||
},
|
||||
},
|
||||
|
||||
// duffcopy
|
||||
// arg0 = address of dst memory (in X25, changed as side effect)
|
||||
// arg1 = address of src memory (in X24, changed as side effect)
|
||||
// general unaligned move
|
||||
// arg0 = address of dst memory (clobber)
|
||||
// arg1 = address of src memory (clobber)
|
||||
// arg2 = mem
|
||||
// auxint = offset into duffcopy code to start executing
|
||||
// X1 (link register) changed because of function call
|
||||
// auxint = size and type alignment
|
||||
// returns mem
|
||||
// mov (offset)(Rarg1), TMP
|
||||
// mov TMP, (offset)(Rarg0)
|
||||
{
|
||||
name: "DUFFCOPY",
|
||||
aux: "Int64",
|
||||
name: "LoweredMove",
|
||||
aux: "SymValAndOff",
|
||||
symEffect: "Write",
|
||||
argLength: 3,
|
||||
reg: regInfo{
|
||||
inputs: []regMask{regNamed["X25"], regNamed["X24"]},
|
||||
clobbers: regNamed["X1"] | regNamed["X24"] | regNamed["X25"],
|
||||
inputs: []regMask{gpMask &^ regNamed["X5"], gpMask &^ regNamed["X5"]},
|
||||
clobbers: regNamed["X5"],
|
||||
},
|
||||
typ: "Mem",
|
||||
faultOnNilArg0: true,
|
||||
faultOnNilArg1: true,
|
||||
},
|
||||
|
||||
// Generic moves and zeros
|
||||
|
||||
// general unaligned zeroing
|
||||
// arg0 = address of memory to zero (in X5, changed as side effect)
|
||||
// arg1 = address of the last element to zero (inclusive)
|
||||
// arg2 = mem
|
||||
// auxint = element size
|
||||
// returns mem
|
||||
// mov ZERO, (X5)
|
||||
// ADD $sz, X5
|
||||
// BGEU Rarg1, X5, -2(PC)
|
||||
{
|
||||
name: "LoweredZero",
|
||||
aux: "Int64",
|
||||
argLength: 3,
|
||||
reg: regInfo{
|
||||
inputs: []regMask{regNamed["X5"], gpMask},
|
||||
clobbers: regNamed["X5"],
|
||||
},
|
||||
typ: "Mem",
|
||||
faultOnNilArg0: true,
|
||||
},
|
||||
|
||||
// general unaligned move
|
||||
// arg0 = address of dst memory (in X5, changed as side effect)
|
||||
// arg1 = address of src memory (in X6, changed as side effect)
|
||||
// arg2 = address of the last element of src (can't be X7 as we clobber it before using arg2)
|
||||
// arg0 = address of dst memory (clobber)
|
||||
// arg1 = address of src memory (clobber)
|
||||
// arg3 = mem
|
||||
// auxint = alignment
|
||||
// clobbers X7 as a tmp register.
|
||||
// returns mem
|
||||
// mov (X6), X7
|
||||
// mov X7, (X5)
|
||||
// ADD $sz, X5
|
||||
// ADD $sz, X6
|
||||
// BGEU Rarg2, X5, -4(PC)
|
||||
//loop:
|
||||
// mov (Rarg1), X5
|
||||
// mov X5, (Rarg0)
|
||||
// ...rest 7 mov...
|
||||
// ADD $sz, Rarg0
|
||||
// ADD $sz, Rarg1
|
||||
// BNE X6, Rarg1, loop
|
||||
{
|
||||
name: "LoweredMove",
|
||||
aux: "Int64",
|
||||
argLength: 4,
|
||||
name: "LoweredMoveLoop",
|
||||
aux: "SymValAndOff",
|
||||
argLength: 3,
|
||||
symEffect: "Write",
|
||||
reg: regInfo{
|
||||
inputs: []regMask{regNamed["X5"], regNamed["X6"], gpMask &^ regNamed["X7"]},
|
||||
clobbers: regNamed["X5"] | regNamed["X6"] | regNamed["X7"],
|
||||
inputs: []regMask{gpMask &^ r5toR6, gpMask &^ r5toR6},
|
||||
clobbers: r5toR6,
|
||||
clobbersArg0: true,
|
||||
clobbersArg1: true,
|
||||
},
|
||||
typ: "Mem",
|
||||
faultOnNilArg0: true,
|
||||
faultOnNilArg1: true,
|
||||
},
|
||||
|
|
|
|||
|
|
@ -2088,7 +2088,7 @@
|
|||
(NilCheck ptr:(NilCheck _ _) _ ) => ptr
|
||||
|
||||
// for late-expanded calls, recognize memequal applied to a single constant byte
|
||||
// Support is limited by 1, 2, 4, 8 byte sizes
|
||||
// Support is limited by [1-8] byte sizes
|
||||
(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [1]) mem)
|
||||
&& isSameCall(callAux, "runtime.memequal")
|
||||
&& symIsRO(scon)
|
||||
|
|
@ -2135,6 +2135,118 @@
|
|||
&& canLoadUnaligned(config) && config.PtrSize == 8
|
||||
=> (MakeResult (Eq64 (Load <typ.Int64> sptr mem) (Const64 <typ.Int64> [int64(read64(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
|
||||
|
||||
(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [3]) mem)
|
||||
&& isSameCall(callAux, "runtime.memequal")
|
||||
&& symIsRO(scon)
|
||||
&& canLoadUnaligned(config) =>
|
||||
(MakeResult
|
||||
(Eq32
|
||||
(Or32 <typ.Int32>
|
||||
(ZeroExt16to32 <typ.Int32> (Load <typ.Int16> sptr mem))
|
||||
(Lsh32x32 <typ.Int32>
|
||||
(ZeroExt8to32 <typ.Int32> (Load <typ.Int8> (OffPtr <typ.BytePtr> [2] sptr) mem))
|
||||
(Const32 <typ.Int32> [16])))
|
||||
(Const32 <typ.Int32> [int32(uint32(read16(scon,0,config.ctxt.Arch.ByteOrder))|(uint32(read8(scon,2))<<16))]))
|
||||
mem)
|
||||
|
||||
(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [3]) mem)
|
||||
&& isSameCall(callAux, "runtime.memequal")
|
||||
&& symIsRO(scon)
|
||||
&& canLoadUnaligned(config) =>
|
||||
(MakeResult
|
||||
(Eq32
|
||||
(Or32 <typ.Int32>
|
||||
(ZeroExt16to32 <typ.Int32> (Load <typ.Int16> sptr mem))
|
||||
(Lsh32x32 <typ.Int32>
|
||||
(ZeroExt8to32 <typ.Int32> (Load <typ.Int8> (OffPtr <typ.BytePtr> [2] sptr) mem))
|
||||
(Const32 <typ.Int32> [16])))
|
||||
(Const32 <typ.Int32> [int32(uint32(read16(scon,0,config.ctxt.Arch.ByteOrder))|(uint32(read8(scon,2))<<16))]))
|
||||
mem)
|
||||
|
||||
(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [5]) mem)
|
||||
&& isSameCall(callAux, "runtime.memequal")
|
||||
&& symIsRO(scon)
|
||||
&& canLoadUnaligned(config) && config.PtrSize == 8 =>
|
||||
(MakeResult
|
||||
(Eq64
|
||||
(Or64 <typ.Int64>
|
||||
(ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem))
|
||||
(Lsh64x64 <typ.Int64>
|
||||
(ZeroExt8to64 <typ.Int64> (Load <typ.Int8> (OffPtr <typ.BytePtr> [4] sptr) mem))
|
||||
(Const64 <typ.Int64> [32])))
|
||||
(Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read8(scon,4))<<32))]))
|
||||
mem)
|
||||
|
||||
(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [5]) mem)
|
||||
&& isSameCall(callAux, "runtime.memequal")
|
||||
&& symIsRO(scon)
|
||||
&& canLoadUnaligned(config) && config.PtrSize == 8 =>
|
||||
(MakeResult
|
||||
(Eq64
|
||||
(Or64 <typ.Int64>
|
||||
(ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem))
|
||||
(Lsh64x64 <typ.Int64>
|
||||
(ZeroExt8to64 <typ.Int64> (Load <typ.Int8> (OffPtr <typ.BytePtr> [4] sptr) mem))
|
||||
(Const64 <typ.Int64> [32])))
|
||||
(Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read8(scon,4))<<32))]))
|
||||
mem)
|
||||
|
||||
(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [6]) mem)
|
||||
&& isSameCall(callAux, "runtime.memequal")
|
||||
&& symIsRO(scon)
|
||||
&& canLoadUnaligned(config) && config.PtrSize == 8 =>
|
||||
(MakeResult
|
||||
(Eq64
|
||||
(Or64 <typ.Int64>
|
||||
(ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem))
|
||||
(Lsh64x64 <typ.Int64>
|
||||
(ZeroExt16to64 <typ.Int64> (Load <typ.Int16> (OffPtr <typ.BytePtr> [4] sptr) mem))
|
||||
(Const64 <typ.Int64> [32])))
|
||||
(Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read16(scon,4,config.ctxt.Arch.ByteOrder))<<32))]))
|
||||
mem)
|
||||
|
||||
(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [6]) mem)
|
||||
&& isSameCall(callAux, "runtime.memequal")
|
||||
&& symIsRO(scon)
|
||||
&& canLoadUnaligned(config) && config.PtrSize == 8 =>
|
||||
(MakeResult
|
||||
(Eq64
|
||||
(Or64 <typ.Int64>
|
||||
(ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem))
|
||||
(Lsh64x64 <typ.Int64>
|
||||
(ZeroExt16to64 <typ.Int64> (Load <typ.Int16> (OffPtr <typ.BytePtr> [4] sptr) mem))
|
||||
(Const64 <typ.Int64> [32])))
|
||||
(Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read16(scon,4,config.ctxt.Arch.ByteOrder))<<32))]))
|
||||
mem)
|
||||
|
||||
(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [7]) mem)
|
||||
&& isSameCall(callAux, "runtime.memequal")
|
||||
&& symIsRO(scon)
|
||||
&& canLoadUnaligned(config) && config.PtrSize == 8 =>
|
||||
(MakeResult
|
||||
(Eq64
|
||||
(Or64 <typ.Int64>
|
||||
(ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem))
|
||||
(Lsh64x64 <typ.Int64>
|
||||
(ZeroExt32to64 <typ.Int64> (Load <typ.Int32> (OffPtr <typ.BytePtr> [3] sptr) mem))
|
||||
(Const64 <typ.Int64> [32])))
|
||||
(Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read32(scon,3,config.ctxt.Arch.ByteOrder))<<32))]))
|
||||
mem)
|
||||
|
||||
(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [7]) mem)
|
||||
&& isSameCall(callAux, "runtime.memequal")
|
||||
&& symIsRO(scon)
|
||||
&& canLoadUnaligned(config) && config.PtrSize == 8 =>
|
||||
(MakeResult
|
||||
(Eq64
|
||||
(Or64 <typ.Int64>
|
||||
(ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem))
|
||||
(Lsh64x64 <typ.Int64>
|
||||
(ZeroExt32to64 <typ.Int64> (Load <typ.Int32> (OffPtr <typ.BytePtr> [3] sptr) mem))
|
||||
(Const64 <typ.Int64> [32])))
|
||||
(Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read32(scon,3,config.ctxt.Arch.ByteOrder))<<32))]))
|
||||
mem)
|
||||
|
||||
(StaticLECall {callAux} _ _ (Const64 [0]) mem)
|
||||
&& isSameCall(callAux, "runtime.memequal")
|
||||
=> (MakeResult (ConstBool <typ.Bool> [true]) mem)
|
||||
|
|
@ -2761,21 +2873,15 @@
|
|||
(RotateLeft(64|32|16|8) (RotateLeft(64|32|16|8) x c) d) && c.Type.Size() == 2 && d.Type.Size() == 2 => (RotateLeft(64|32|16|8) x (Add16 <c.Type> c d))
|
||||
(RotateLeft(64|32|16|8) (RotateLeft(64|32|16|8) x c) d) && c.Type.Size() == 1 && d.Type.Size() == 1 => (RotateLeft(64|32|16|8) x (Add8 <c.Type> c d))
|
||||
|
||||
// Loading constant values from dictionaries and itabs.
|
||||
(Load <typ.BytePtr> (OffPtr [off] (Addr {s} sb) ) _) && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb)
|
||||
(Load <typ.BytePtr> (OffPtr [off] (Convert (Addr {s} sb) _) ) _) && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb)
|
||||
(Load <typ.BytePtr> (OffPtr [off] (ITab (IMake (Addr {s} sb) _))) _) && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb)
|
||||
(Load <typ.BytePtr> (OffPtr [off] (ITab (IMake (Convert (Addr {s} sb) _) _))) _) && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb)
|
||||
(Load <typ.Uintptr> (OffPtr [off] (Addr {s} sb) ) _) && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb)
|
||||
(Load <typ.Uintptr> (OffPtr [off] (Convert (Addr {s} sb) _) ) _) && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb)
|
||||
(Load <typ.Uintptr> (OffPtr [off] (ITab (IMake (Addr {s} sb) _))) _) && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb)
|
||||
(Load <typ.Uintptr> (OffPtr [off] (ITab (IMake (Convert (Addr {s} sb) _) _))) _) && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb)
|
||||
|
||||
// Loading constant values from runtime._type.hash.
|
||||
(Load <t> (OffPtr [off] (Addr {sym} _) ) _) && t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off) => (Const32 [fixed32(config, sym, off)])
|
||||
(Load <t> (OffPtr [off] (Convert (Addr {sym} _) _) ) _) && t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off) => (Const32 [fixed32(config, sym, off)])
|
||||
(Load <t> (OffPtr [off] (ITab (IMake (Addr {sym} _) _))) _) && t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off) => (Const32 [fixed32(config, sym, off)])
|
||||
(Load <t> (OffPtr [off] (ITab (IMake (Convert (Addr {sym} _) _) _))) _) && t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off) => (Const32 [fixed32(config, sym, off)])
|
||||
// Loading fixed addresses and constants.
|
||||
(Load (Addr {s} sb) _) && isFixedLoad(v, s, 0) => rewriteFixedLoad(v, s, sb, 0)
|
||||
(Load (Convert (Addr {s} sb) _) _) && isFixedLoad(v, s, 0) => rewriteFixedLoad(v, s, sb, 0)
|
||||
(Load (ITab (IMake (Addr {s} sb) _)) _) && isFixedLoad(v, s, 0) => rewriteFixedLoad(v, s, sb, 0)
|
||||
(Load (ITab (IMake (Convert (Addr {s} sb) _) _)) _) && isFixedLoad(v, s, 0) => rewriteFixedLoad(v, s, sb, 0)
|
||||
(Load (OffPtr [off] (Addr {s} sb) ) _) && isFixedLoad(v, s, off) => rewriteFixedLoad(v, s, sb, off)
|
||||
(Load (OffPtr [off] (Convert (Addr {s} sb) _) ) _) && isFixedLoad(v, s, off) => rewriteFixedLoad(v, s, sb, off)
|
||||
(Load (OffPtr [off] (ITab (IMake (Addr {s} sb) _))) _) && isFixedLoad(v, s, off) => rewriteFixedLoad(v, s, sb, off)
|
||||
(Load (OffPtr [off] (ITab (IMake (Convert (Addr {s} sb) _) _))) _) && isFixedLoad(v, s, off) => rewriteFixedLoad(v, s, sb, off)
|
||||
|
||||
// Calling cmpstring a second time with the same arguments in the
|
||||
// same memory state can reuse the results of the first call.
|
||||
|
|
|
|||
|
|
@ -1452,7 +1452,7 @@ func opHasAuxInt(op opData) bool {
|
|||
switch op.aux {
|
||||
case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "UInt8", "Float32", "Float64",
|
||||
"SymOff", "CallOff", "SymValAndOff", "TypSize", "ARM64BitField", "FlagConstant", "CCop",
|
||||
"PanicBoundsC", "PanicBoundsCC":
|
||||
"PanicBoundsC", "PanicBoundsCC", "ARM64ConditionalParams":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
|
@ -1860,6 +1860,8 @@ func (op opData) auxIntType() string {
|
|||
return "flagConstant"
|
||||
case "ARM64BitField":
|
||||
return "arm64BitField"
|
||||
case "ARM64ConditionalParams":
|
||||
return "arm64ConditionalParams"
|
||||
case "PanicBoundsC", "PanicBoundsCC":
|
||||
return "int64"
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -145,7 +145,7 @@ func checkFunc(f *Func) {
|
|||
f.Fatalf("bad int32 AuxInt value for %v", v)
|
||||
}
|
||||
canHaveAuxInt = true
|
||||
case auxInt64, auxARM64BitField:
|
||||
case auxInt64, auxARM64BitField, auxARM64ConditionalParams:
|
||||
canHaveAuxInt = true
|
||||
case auxInt128:
|
||||
// AuxInt must be zero, so leave canHaveAuxInt set to false.
|
||||
|
|
|
|||
|
|
@ -279,6 +279,8 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize, softfloat boo
|
|||
c.RegSize = 8
|
||||
c.lowerBlock = rewriteBlockMIPS64
|
||||
c.lowerValue = rewriteValueMIPS64
|
||||
c.lateLowerBlock = rewriteBlockMIPS64latelower
|
||||
c.lateLowerValue = rewriteValueMIPS64latelower
|
||||
c.registers = registersMIPS64[:]
|
||||
c.gpRegMask = gpRegMaskMIPS64
|
||||
c.fpRegMask = fpRegMaskMIPS64
|
||||
|
|
|
|||
|
|
@ -181,34 +181,45 @@ func cse(f *Func) {
|
|||
for _, e := range partition {
|
||||
slices.SortFunc(e, func(v, w *Value) int {
|
||||
c := cmp.Compare(sdom.domorder(v.Block), sdom.domorder(w.Block))
|
||||
if v.Op != OpLocalAddr || c != 0 {
|
||||
if c != 0 {
|
||||
return c
|
||||
}
|
||||
// compare the memory args for OpLocalAddrs in the same block
|
||||
vm := v.Args[1]
|
||||
wm := w.Args[1]
|
||||
if vm == wm {
|
||||
return 0
|
||||
if v.Op == OpLocalAddr {
|
||||
// compare the memory args for OpLocalAddrs in the same block
|
||||
vm := v.Args[1]
|
||||
wm := w.Args[1]
|
||||
if vm == wm {
|
||||
return 0
|
||||
}
|
||||
// if the two OpLocalAddrs are in the same block, and one's memory
|
||||
// arg also in the same block, but the other one's memory arg not,
|
||||
// the latter must be in an ancestor block
|
||||
if vm.Block != v.Block {
|
||||
return -1
|
||||
}
|
||||
if wm.Block != w.Block {
|
||||
return +1
|
||||
}
|
||||
// use store order if the memory args are in the same block
|
||||
vs := storeOrdering(vm, o)
|
||||
ws := storeOrdering(wm, o)
|
||||
if vs <= 0 {
|
||||
f.Fatalf("unable to determine the order of %s", vm.LongString())
|
||||
}
|
||||
if ws <= 0 {
|
||||
f.Fatalf("unable to determine the order of %s", wm.LongString())
|
||||
}
|
||||
return cmp.Compare(vs, ws)
|
||||
}
|
||||
// if the two OpLocalAddrs are in the same block, and one's memory
|
||||
// arg also in the same block, but the other one's memory arg not,
|
||||
// the latter must be in an ancestor block
|
||||
if vm.Block != v.Block {
|
||||
return -1
|
||||
}
|
||||
if wm.Block != w.Block {
|
||||
vStmt := v.Pos.IsStmt() == src.PosIsStmt
|
||||
wStmt := w.Pos.IsStmt() == src.PosIsStmt
|
||||
if vStmt != wStmt {
|
||||
if vStmt {
|
||||
return -1
|
||||
}
|
||||
return +1
|
||||
}
|
||||
// use store order if the memory args are in the same block
|
||||
vs := storeOrdering(vm, o)
|
||||
ws := storeOrdering(wm, o)
|
||||
if vs <= 0 {
|
||||
f.Fatalf("unable to determine the order of %s", vm.LongString())
|
||||
}
|
||||
if ws <= 0 {
|
||||
f.Fatalf("unable to determine the order of %s", wm.LongString())
|
||||
}
|
||||
return cmp.Compare(vs, ws)
|
||||
return 0
|
||||
})
|
||||
|
||||
for i := 0; i < len(e)-1; i++ {
|
||||
|
|
|
|||
|
|
@ -375,11 +375,12 @@ const (
|
|||
auxPanicBoundsCC // two constants for a bounds failure
|
||||
|
||||
// architecture specific aux types
|
||||
auxARM64BitField // aux is an arm64 bitfield lsb and width packed into auxInt
|
||||
auxS390XRotateParams // aux is a s390x rotate parameters object encoding start bit, end bit and rotate amount
|
||||
auxS390XCCMask // aux is a s390x 4-bit condition code mask
|
||||
auxS390XCCMaskInt8 // aux is a s390x 4-bit condition code mask, auxInt is an int8 immediate
|
||||
auxS390XCCMaskUint8 // aux is a s390x 4-bit condition code mask, auxInt is a uint8 immediate
|
||||
auxARM64BitField // aux is an arm64 bitfield lsb and width packed into auxInt
|
||||
auxARM64ConditionalParams // aux is a structure, which contains condition, NZCV flags and constant with indicator of using it
|
||||
auxS390XRotateParams // aux is a s390x rotate parameters object encoding start bit, end bit and rotate amount
|
||||
auxS390XCCMask // aux is a s390x 4-bit condition code mask
|
||||
auxS390XCCMaskInt8 // aux is a s390x 4-bit condition code mask, auxInt is an int8 immediate
|
||||
auxS390XCCMaskUint8 // aux is a s390x 4-bit condition code mask, auxInt is a uint8 immediate
|
||||
)
|
||||
|
||||
// A SymEffect describes the effect that an SSA Value has on the variable
|
||||
|
|
@ -534,3 +535,11 @@ func (b BoundsKind) Code() (rtabi.BoundsErrorCode, bool) {
|
|||
// width+lsb<64 for 64-bit variant, width+lsb<32 for 32-bit variant.
|
||||
// the meaning of width and lsb are instruction-dependent.
|
||||
type arm64BitField int16
|
||||
|
||||
// arm64ConditionalParams is the GO type of ARM64ConditionalParams auxInt.
|
||||
type arm64ConditionalParams struct {
|
||||
cond Op // Condition code to evaluate
|
||||
nzcv uint8 // Fallback NZCV flags value when condition is false
|
||||
constValue uint8 // Immediate value for constant comparisons
|
||||
ind bool // Constant comparison indicator
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3563,6 +3563,14 @@ const (
|
|||
OpARM64CSINV
|
||||
OpARM64CSNEG
|
||||
OpARM64CSETM
|
||||
OpARM64CCMP
|
||||
OpARM64CCMN
|
||||
OpARM64CCMPconst
|
||||
OpARM64CCMNconst
|
||||
OpARM64CCMPW
|
||||
OpARM64CCMNW
|
||||
OpARM64CCMPWconst
|
||||
OpARM64CCMNWconst
|
||||
OpARM64CALLstatic
|
||||
OpARM64CALLtail
|
||||
OpARM64CALLclosure
|
||||
|
|
@ -3660,11 +3668,14 @@ const (
|
|||
OpLOONG64VPCNT16
|
||||
OpLOONG64ADDV
|
||||
OpLOONG64ADDVconst
|
||||
OpLOONG64ADDV16const
|
||||
OpLOONG64SUBV
|
||||
OpLOONG64SUBVconst
|
||||
OpLOONG64MULV
|
||||
OpLOONG64MULHV
|
||||
OpLOONG64MULHVU
|
||||
OpLOONG64MULH
|
||||
OpLOONG64MULHU
|
||||
OpLOONG64DIVV
|
||||
OpLOONG64DIVVU
|
||||
OpLOONG64REMV
|
||||
|
|
@ -3792,11 +3803,10 @@ const (
|
|||
OpLOONG64CALLtail
|
||||
OpLOONG64CALLclosure
|
||||
OpLOONG64CALLinter
|
||||
OpLOONG64DUFFZERO
|
||||
OpLOONG64LoweredZero
|
||||
OpLOONG64DUFFCOPY
|
||||
OpLOONG64LoweredZeroLoop
|
||||
OpLOONG64LoweredMove
|
||||
OpLOONG64LoweredMoveLoop
|
||||
OpLOONG64LoweredAtomicLoad8
|
||||
OpLOONG64LoweredAtomicLoad32
|
||||
OpLOONG64LoweredAtomicLoad64
|
||||
|
|
@ -4020,10 +4030,7 @@ const (
|
|||
OpMIPS64MOVVstore
|
||||
OpMIPS64MOVFstore
|
||||
OpMIPS64MOVDstore
|
||||
OpMIPS64MOVBstorezero
|
||||
OpMIPS64MOVHstorezero
|
||||
OpMIPS64MOVWstorezero
|
||||
OpMIPS64MOVVstorezero
|
||||
OpMIPS64ZERO
|
||||
OpMIPS64MOVWfpgp
|
||||
OpMIPS64MOVWgpfp
|
||||
OpMIPS64MOVVfpgp
|
||||
|
|
@ -4429,10 +4436,10 @@ const (
|
|||
OpRISCV64CALLtail
|
||||
OpRISCV64CALLclosure
|
||||
OpRISCV64CALLinter
|
||||
OpRISCV64DUFFZERO
|
||||
OpRISCV64DUFFCOPY
|
||||
OpRISCV64LoweredZero
|
||||
OpRISCV64LoweredZeroLoop
|
||||
OpRISCV64LoweredMove
|
||||
OpRISCV64LoweredMoveLoop
|
||||
OpRISCV64LoweredAtomicLoad8
|
||||
OpRISCV64LoweredAtomicLoad32
|
||||
OpRISCV64LoweredAtomicLoad64
|
||||
|
|
@ -54564,6 +54571,98 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "CCMP",
|
||||
auxType: auxARM64ConditionalParams,
|
||||
argLen: 3,
|
||||
asm: arm64.ACCMP,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
|
||||
{1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "CCMN",
|
||||
auxType: auxARM64ConditionalParams,
|
||||
argLen: 3,
|
||||
asm: arm64.ACCMN,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
|
||||
{1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "CCMPconst",
|
||||
auxType: auxARM64ConditionalParams,
|
||||
argLen: 2,
|
||||
asm: arm64.ACCMP,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "CCMNconst",
|
||||
auxType: auxARM64ConditionalParams,
|
||||
argLen: 2,
|
||||
asm: arm64.ACCMN,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "CCMPW",
|
||||
auxType: auxARM64ConditionalParams,
|
||||
argLen: 3,
|
||||
asm: arm64.ACCMPW,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
|
||||
{1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "CCMNW",
|
||||
auxType: auxARM64ConditionalParams,
|
||||
argLen: 3,
|
||||
asm: arm64.ACCMNW,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
|
||||
{1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "CCMPWconst",
|
||||
auxType: auxARM64ConditionalParams,
|
||||
argLen: 2,
|
||||
asm: arm64.ACCMPW,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "CCMNWconst",
|
||||
auxType: auxARM64ConditionalParams,
|
||||
argLen: 2,
|
||||
asm: arm64.ACCMNW,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "CALLstatic",
|
||||
auxType: auxCallOff,
|
||||
|
|
@ -55803,6 +55902,20 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ADDV16const",
|
||||
auxType: auxInt64,
|
||||
argLen: 1,
|
||||
asm: loong64.AADDV16,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "SUBV",
|
||||
argLen: 2,
|
||||
|
|
@ -55876,6 +55989,36 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MULH",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
asm: loong64.AMULH,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
|
||||
{1, 1073741817}, // ZERO R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MULHU",
|
||||
argLen: 2,
|
||||
commutative: true,
|
||||
asm: loong64.AMULHU,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
|
||||
{1, 1073741817}, // ZERO R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "DIVV",
|
||||
argLen: 2,
|
||||
|
|
@ -57638,18 +57781,6 @@ var opcodeTable = [...]opInfo{
|
|||
clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "DUFFZERO",
|
||||
auxType: auxInt64,
|
||||
argLen: 2,
|
||||
faultOnNilArg0: true,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 524288}, // R20
|
||||
},
|
||||
clobbers: 524290, // R1 R20
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "LoweredZero",
|
||||
auxType: auxInt64,
|
||||
|
|
@ -57661,20 +57792,6 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "DUFFCOPY",
|
||||
auxType: auxInt64,
|
||||
argLen: 3,
|
||||
faultOnNilArg0: true,
|
||||
faultOnNilArg1: true,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 1048576}, // R21
|
||||
{1, 524288}, // R20
|
||||
},
|
||||
clobbers: 1572866, // R1 R20 R21
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "LoweredZeroLoop",
|
||||
auxType: auxInt64,
|
||||
|
|
@ -57691,16 +57808,31 @@ var opcodeTable = [...]opInfo{
|
|||
{
|
||||
name: "LoweredMove",
|
||||
auxType: auxInt64,
|
||||
argLen: 4,
|
||||
argLen: 3,
|
||||
faultOnNilArg0: true,
|
||||
faultOnNilArg1: true,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 1048576}, // R21
|
||||
{1, 524288}, // R20
|
||||
{2, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
|
||||
{0, 1071120376}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R21 R23 R24 R25 R26 R27 R28 R29 R31
|
||||
{1, 1071120376}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R21 R23 R24 R25 R26 R27 R28 R29 R31
|
||||
},
|
||||
clobbers: 1572864, // R20 R21
|
||||
clobbers: 524288, // R20
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "LoweredMoveLoop",
|
||||
auxType: auxInt64,
|
||||
argLen: 3,
|
||||
faultOnNilArg0: true,
|
||||
faultOnNilArg1: true,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 1070071800}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R23 R24 R25 R26 R27 R28 R29 R31
|
||||
{1, 1070071800}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R23 R24 R25 R26 R27 R28 R29 R31
|
||||
},
|
||||
clobbers: 1572864, // R20 R21
|
||||
clobbersArg0: true,
|
||||
clobbersArg1: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -59796,7 +59928,7 @@ var opcodeTable = [...]opInfo{
|
|||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
|
||||
|
|
@ -59824,7 +59956,7 @@ var opcodeTable = [...]opInfo{
|
|||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
|
||||
|
|
@ -60031,7 +60163,7 @@ var opcodeTable = [...]opInfo{
|
|||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
|
||||
|
|
@ -60060,7 +60192,7 @@ var opcodeTable = [...]opInfo{
|
|||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
|
||||
|
|
@ -60089,7 +60221,7 @@ var opcodeTable = [...]opInfo{
|
|||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
|
||||
|
|
@ -60118,7 +60250,7 @@ var opcodeTable = [...]opInfo{
|
|||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
|
||||
|
|
@ -60223,7 +60355,7 @@ var opcodeTable = [...]opInfo{
|
|||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
|
||||
|
|
@ -60251,7 +60383,7 @@ var opcodeTable = [...]opInfo{
|
|||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
|
||||
|
|
@ -60279,7 +60411,7 @@ var opcodeTable = [...]opInfo{
|
|||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
|
||||
|
|
@ -60307,7 +60439,7 @@ var opcodeTable = [...]opInfo{
|
|||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
|
||||
|
|
@ -60335,7 +60467,7 @@ var opcodeTable = [...]opInfo{
|
|||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
|
||||
|
|
@ -60627,7 +60759,7 @@ var opcodeTable = [...]opInfo{
|
|||
asm: mips.AMOVB,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
|
||||
},
|
||||
},
|
||||
|
|
@ -60641,7 +60773,7 @@ var opcodeTable = [...]opInfo{
|
|||
asm: mips.AMOVH,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
|
||||
},
|
||||
},
|
||||
|
|
@ -60655,7 +60787,7 @@ var opcodeTable = [...]opInfo{
|
|||
asm: mips.AMOVW,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
|
||||
},
|
||||
},
|
||||
|
|
@ -60669,7 +60801,7 @@ var opcodeTable = [...]opInfo{
|
|||
asm: mips.AMOVV,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
|
||||
},
|
||||
},
|
||||
|
|
@ -60703,56 +60835,11 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "MOVBstorezero",
|
||||
auxType: auxSymOff,
|
||||
argLen: 2,
|
||||
faultOnNilArg0: true,
|
||||
symEffect: SymWrite,
|
||||
asm: mips.AMOVB,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MOVHstorezero",
|
||||
auxType: auxSymOff,
|
||||
argLen: 2,
|
||||
faultOnNilArg0: true,
|
||||
symEffect: SymWrite,
|
||||
asm: mips.AMOVH,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MOVWstorezero",
|
||||
auxType: auxSymOff,
|
||||
argLen: 2,
|
||||
faultOnNilArg0: true,
|
||||
symEffect: SymWrite,
|
||||
asm: mips.AMOVW,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MOVVstorezero",
|
||||
auxType: auxSymOff,
|
||||
argLen: 2,
|
||||
faultOnNilArg0: true,
|
||||
symEffect: SymWrite,
|
||||
asm: mips.AMOVV,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
|
||||
},
|
||||
},
|
||||
name: "ZERO",
|
||||
argLen: 0,
|
||||
zeroWidth: true,
|
||||
fixedReg: true,
|
||||
reg: regInfo{},
|
||||
},
|
||||
{
|
||||
name: "MOVWfpgp",
|
||||
|
|
@ -61153,7 +61240,7 @@ var opcodeTable = [...]opInfo{
|
|||
asm: mips.AAND,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
|
||||
},
|
||||
},
|
||||
|
|
@ -61167,7 +61254,7 @@ var opcodeTable = [...]opInfo{
|
|||
asm: mips.AOR,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
|
||||
},
|
||||
},
|
||||
|
|
@ -61218,7 +61305,7 @@ var opcodeTable = [...]opInfo{
|
|||
hasSideEffects: true,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
|
||||
},
|
||||
},
|
||||
|
|
@ -61230,7 +61317,7 @@ var opcodeTable = [...]opInfo{
|
|||
hasSideEffects: true,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
|
||||
},
|
||||
},
|
||||
|
|
@ -61242,7 +61329,7 @@ var opcodeTable = [...]opInfo{
|
|||
hasSideEffects: true,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{1, 234881023}, // ZERO R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
|
||||
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
|
||||
},
|
||||
},
|
||||
|
|
@ -66181,57 +66268,61 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "DUFFZERO",
|
||||
auxType: auxInt64,
|
||||
name: "LoweredZero",
|
||||
auxType: auxSymValAndOff,
|
||||
argLen: 2,
|
||||
faultOnNilArg0: true,
|
||||
symEffect: SymWrite,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 16777216}, // X25
|
||||
{0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
|
||||
},
|
||||
clobbers: 16777216, // X25
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "DUFFCOPY",
|
||||
auxType: auxInt64,
|
||||
name: "LoweredZeroLoop",
|
||||
auxType: auxSymValAndOff,
|
||||
argLen: 2,
|
||||
needIntTemp: true,
|
||||
faultOnNilArg0: true,
|
||||
symEffect: SymWrite,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
|
||||
},
|
||||
clobbersArg0: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "LoweredMove",
|
||||
auxType: auxSymValAndOff,
|
||||
argLen: 3,
|
||||
faultOnNilArg0: true,
|
||||
faultOnNilArg1: true,
|
||||
symEffect: SymWrite,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 16777216}, // X25
|
||||
{1, 8388608}, // X24
|
||||
},
|
||||
clobbers: 25165824, // X24 X25
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "LoweredZero",
|
||||
auxType: auxInt64,
|
||||
argLen: 3,
|
||||
faultOnNilArg0: true,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 16}, // X5
|
||||
{1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
|
||||
{0, 1006632928}, // X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
|
||||
{1, 1006632928}, // X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
|
||||
},
|
||||
clobbers: 16, // X5
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "LoweredMove",
|
||||
auxType: auxInt64,
|
||||
argLen: 4,
|
||||
name: "LoweredMoveLoop",
|
||||
auxType: auxSymValAndOff,
|
||||
argLen: 3,
|
||||
faultOnNilArg0: true,
|
||||
faultOnNilArg1: true,
|
||||
symEffect: SymWrite,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 16}, // X5
|
||||
{1, 32}, // X6
|
||||
{2, 1006632880}, // X5 X6 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
|
||||
{0, 1006632896}, // X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
|
||||
{1, 1006632896}, // X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
|
||||
},
|
||||
clobbers: 112, // X5 X6 X7
|
||||
clobbers: 48, // X5 X6
|
||||
clobbersArg0: true,
|
||||
clobbersArg1: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -81864,7 +81955,7 @@ var specialRegMaskMIPS = regMask(105553116266496)
|
|||
var framepointerRegMIPS = int8(-1)
|
||||
var linkRegMIPS = int8(28)
|
||||
var registersMIPS64 = [...]Register{
|
||||
{0, mips.REG_R0, "R0"},
|
||||
{0, mips.REGZERO, "ZERO"},
|
||||
{1, mips.REG_R1, "R1"},
|
||||
{2, mips.REG_R2, "R2"},
|
||||
{3, mips.REG_R3, "R3"},
|
||||
|
|
|
|||
|
|
@ -1438,7 +1438,7 @@ func (s *regAllocState) regalloc(f *Func) {
|
|||
case OpSB:
|
||||
s.assignReg(s.SBReg, v, v)
|
||||
s.sb = v.ID
|
||||
case OpARM64ZERO, OpLOONG64ZERO:
|
||||
case OpARM64ZERO, OpLOONG64ZERO, OpMIPS64ZERO:
|
||||
s.assignReg(s.ZeroIntReg, v, v)
|
||||
case OpAMD64Zero128, OpAMD64Zero256, OpAMD64Zero512:
|
||||
regspec := s.regspec(v)
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import (
|
|||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/logopt"
|
||||
"cmd/compile/internal/reflectdata"
|
||||
"cmd/compile/internal/rttype"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/obj"
|
||||
"cmd/internal/obj/s390x"
|
||||
|
|
@ -668,6 +669,17 @@ func auxIntToValAndOff(i int64) ValAndOff {
|
|||
func auxIntToArm64BitField(i int64) arm64BitField {
|
||||
return arm64BitField(i)
|
||||
}
|
||||
func auxIntToArm64ConditionalParams(i int64) arm64ConditionalParams {
|
||||
var params arm64ConditionalParams
|
||||
params.cond = Op(i & 0xffff)
|
||||
i >>= 16
|
||||
params.nzcv = uint8(i & 0x0f)
|
||||
i >>= 4
|
||||
params.constValue = uint8(i & 0x1f)
|
||||
i >>= 5
|
||||
params.ind = i == 1
|
||||
return params
|
||||
}
|
||||
func auxIntToFlagConstant(x int64) flagConstant {
|
||||
return flagConstant(x)
|
||||
}
|
||||
|
|
@ -709,6 +721,20 @@ func valAndOffToAuxInt(v ValAndOff) int64 {
|
|||
func arm64BitFieldToAuxInt(v arm64BitField) int64 {
|
||||
return int64(v)
|
||||
}
|
||||
func arm64ConditionalParamsToAuxInt(v arm64ConditionalParams) int64 {
|
||||
if v.cond&^0xffff != 0 {
|
||||
panic("condition value exceeds 16 bits")
|
||||
}
|
||||
|
||||
var i int64
|
||||
if v.ind {
|
||||
i = 1 << 25
|
||||
}
|
||||
i |= int64(v.constValue) << 20
|
||||
i |= int64(v.nzcv) << 16
|
||||
i |= int64(v.cond)
|
||||
return i
|
||||
}
|
||||
func flagConstantToAuxInt(x flagConstant) int64 {
|
||||
return int64(x)
|
||||
}
|
||||
|
|
@ -1898,6 +1924,43 @@ func arm64BFWidth(mask, rshift int64) int64 {
|
|||
return nto(shiftedMask)
|
||||
}
|
||||
|
||||
// encodes condition code and NZCV flags into auxint.
|
||||
func arm64ConditionalParamsAuxInt(cond Op, nzcv uint8) arm64ConditionalParams {
|
||||
if cond < OpARM64Equal || cond > OpARM64GreaterEqualU {
|
||||
panic("Wrong conditional operation")
|
||||
}
|
||||
if nzcv&0x0f != nzcv {
|
||||
panic("Wrong value of NZCV flag")
|
||||
}
|
||||
return arm64ConditionalParams{cond, nzcv, 0, false}
|
||||
}
|
||||
|
||||
// encodes condition code, NZCV flags and constant value into auxint.
|
||||
func arm64ConditionalParamsAuxIntWithValue(cond Op, nzcv uint8, value uint8) arm64ConditionalParams {
|
||||
if value&0x1f != value {
|
||||
panic("Wrong value of constant")
|
||||
}
|
||||
params := arm64ConditionalParamsAuxInt(cond, nzcv)
|
||||
params.constValue = value
|
||||
params.ind = true
|
||||
return params
|
||||
}
|
||||
|
||||
// extracts condition code from auxint.
|
||||
func (condParams arm64ConditionalParams) Cond() Op {
|
||||
return condParams.cond
|
||||
}
|
||||
|
||||
// extracts NZCV flags from auxint.
|
||||
func (condParams arm64ConditionalParams) Nzcv() int64 {
|
||||
return int64(condParams.nzcv)
|
||||
}
|
||||
|
||||
// extracts constant value from auxint if present.
|
||||
func (condParams arm64ConditionalParams) ConstValue() (int64, bool) {
|
||||
return int64(condParams.constValue), condParams.ind
|
||||
}
|
||||
|
||||
// registerizable reports whether t is a primitive type that fits in
|
||||
// a register. It assumes float64 values will always fit into registers
|
||||
// even if that isn't strictly true.
|
||||
|
|
@ -1981,74 +2044,128 @@ func symIsROZero(sym Sym) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// isFixed32 returns true if the int32 at offset off in symbol sym
|
||||
// is known and constant.
|
||||
func isFixed32(c *Config, sym Sym, off int64) bool {
|
||||
return isFixed(c, sym, off, 4)
|
||||
}
|
||||
|
||||
// isFixed returns true if the range [off,off+size] of the symbol sym
|
||||
// is known and constant.
|
||||
func isFixed(c *Config, sym Sym, off, size int64) bool {
|
||||
// isFixedLoad returns true if the load can be resolved to fixed address or constant,
|
||||
// and can be rewritten by rewriteFixedLoad.
|
||||
func isFixedLoad(v *Value, sym Sym, off int64) bool {
|
||||
lsym := sym.(*obj.LSym)
|
||||
if lsym.Extra == nil {
|
||||
if (v.Type.IsPtrShaped() || v.Type.IsUintptr()) && lsym.Type == objabi.SRODATA {
|
||||
for _, r := range lsym.R {
|
||||
if (r.Type == objabi.R_ADDR || r.Type == objabi.R_WEAKADDR) && int64(r.Off) == off && r.Add == 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
if _, ok := (*lsym.Extra).(*obj.TypeInfo); ok {
|
||||
if off == 2*c.PtrSize && size == 4 {
|
||||
return true // type hash field
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
func fixed32(c *Config, sym Sym, off int64) int32 {
|
||||
lsym := sym.(*obj.LSym)
|
||||
if ti, ok := (*lsym.Extra).(*obj.TypeInfo); ok {
|
||||
if off == 2*c.PtrSize {
|
||||
return int32(types.TypeHash(ti.Type.(*types.Type)))
|
||||
}
|
||||
}
|
||||
base.Fatalf("fixed32 data not known for %s:%d", sym, off)
|
||||
return 0
|
||||
}
|
||||
|
||||
// isFixedSym returns true if the contents of sym at the given offset
|
||||
// is known and is the constant address of another symbol.
|
||||
func isFixedSym(sym Sym, off int64) bool {
|
||||
lsym := sym.(*obj.LSym)
|
||||
switch {
|
||||
case lsym.Type == objabi.SRODATA:
|
||||
// itabs, dictionaries
|
||||
default:
|
||||
return false
|
||||
}
|
||||
for _, r := range lsym.R {
|
||||
if (r.Type == objabi.R_ADDR || r.Type == objabi.R_WEAKADDR) && int64(r.Off) == off && r.Add == 0 {
|
||||
if strings.HasPrefix(lsym.Name, "type:") {
|
||||
// Type symbols do not contain information about their fields, unlike the cases above.
|
||||
// Hand-implement field accesses.
|
||||
// TODO: can this be replaced with reflectdata.writeType and just use the code above?
|
||||
|
||||
t := (*lsym.Extra).(*obj.TypeInfo).Type.(*types.Type)
|
||||
|
||||
for _, f := range rttype.Type.Fields() {
|
||||
if f.Offset == off && copyCompatibleType(v.Type, f.Type) {
|
||||
switch f.Sym.Name {
|
||||
case "Size_", "PtrBytes", "Hash", "Kind_":
|
||||
return true
|
||||
default:
|
||||
// fmt.Println("unknown field", f.Sym.Name)
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if t.IsPtr() && off == rttype.PtrType.OffsetOf("Elem") {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
func fixedSym(f *Func, sym Sym, off int64) Sym {
|
||||
|
||||
// rewriteFixedLoad rewrites a load to a fixed address or constant, if isFixedLoad returns true.
|
||||
func rewriteFixedLoad(v *Value, sym Sym, sb *Value, off int64) *Value {
|
||||
b := v.Block
|
||||
f := b.Func
|
||||
|
||||
lsym := sym.(*obj.LSym)
|
||||
for _, r := range lsym.R {
|
||||
if (r.Type == objabi.R_ADDR || r.Type == objabi.R_WEAKADDR) && int64(r.Off) == off {
|
||||
if strings.HasPrefix(r.Sym.Name, "type:") {
|
||||
// In case we're loading a type out of a dictionary, we need to record
|
||||
// that the containing function might put that type in an interface.
|
||||
// That information is currently recorded in relocations in the dictionary,
|
||||
// but if we perform this load at compile time then the dictionary
|
||||
// might be dead.
|
||||
reflectdata.MarkTypeSymUsedInInterface(r.Sym, f.fe.Func().Linksym())
|
||||
} else if strings.HasPrefix(r.Sym.Name, "go:itab") {
|
||||
// Same, but if we're using an itab we need to record that the
|
||||
// itab._type might be put in an interface.
|
||||
reflectdata.MarkTypeSymUsedInInterface(r.Sym, f.fe.Func().Linksym())
|
||||
if (v.Type.IsPtrShaped() || v.Type.IsUintptr()) && lsym.Type == objabi.SRODATA {
|
||||
for _, r := range lsym.R {
|
||||
if (r.Type == objabi.R_ADDR || r.Type == objabi.R_WEAKADDR) && int64(r.Off) == off && r.Add == 0 {
|
||||
if strings.HasPrefix(r.Sym.Name, "type:") {
|
||||
// In case we're loading a type out of a dictionary, we need to record
|
||||
// that the containing function might put that type in an interface.
|
||||
// That information is currently recorded in relocations in the dictionary,
|
||||
// but if we perform this load at compile time then the dictionary
|
||||
// might be dead.
|
||||
reflectdata.MarkTypeSymUsedInInterface(r.Sym, f.fe.Func().Linksym())
|
||||
} else if strings.HasPrefix(r.Sym.Name, "go:itab") {
|
||||
// Same, but if we're using an itab we need to record that the
|
||||
// itab._type might be put in an interface.
|
||||
reflectdata.MarkTypeSymUsedInInterface(r.Sym, f.fe.Func().Linksym())
|
||||
}
|
||||
v.reset(OpAddr)
|
||||
v.Aux = symToAux(r.Sym)
|
||||
v.AddArg(sb)
|
||||
return v
|
||||
}
|
||||
return r.Sym
|
||||
}
|
||||
base.Fatalf("fixedLoad data not known for %s:%d", sym, off)
|
||||
}
|
||||
base.Fatalf("fixedSym data not known for %s:%d", sym, off)
|
||||
|
||||
if strings.HasPrefix(lsym.Name, "type:") {
|
||||
// Type symbols do not contain information about their fields, unlike the cases above.
|
||||
// Hand-implement field accesses.
|
||||
// TODO: can this be replaced with reflectdata.writeType and just use the code above?
|
||||
|
||||
t := (*lsym.Extra).(*obj.TypeInfo).Type.(*types.Type)
|
||||
|
||||
ptrSizedOpConst := OpConst64
|
||||
if f.Config.PtrSize == 4 {
|
||||
ptrSizedOpConst = OpConst32
|
||||
}
|
||||
|
||||
for _, f := range rttype.Type.Fields() {
|
||||
if f.Offset == off && copyCompatibleType(v.Type, f.Type) {
|
||||
switch f.Sym.Name {
|
||||
case "Size_":
|
||||
v.reset(ptrSizedOpConst)
|
||||
v.AuxInt = int64(t.Size())
|
||||
return v
|
||||
case "PtrBytes":
|
||||
v.reset(ptrSizedOpConst)
|
||||
v.AuxInt = int64(types.PtrDataSize(t))
|
||||
return v
|
||||
case "Hash":
|
||||
v.reset(OpConst32)
|
||||
v.AuxInt = int64(types.TypeHash(t))
|
||||
return v
|
||||
case "Kind_":
|
||||
v.reset(OpConst8)
|
||||
v.AuxInt = int64(reflectdata.ABIKindOfType(t))
|
||||
return v
|
||||
default:
|
||||
base.Fatalf("unknown field %s for fixedLoad of %s at offset %d", f.Sym.Name, lsym.Name, off)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if t.IsPtr() && off == rttype.PtrType.OffsetOf("Elem") {
|
||||
elemSym := reflectdata.TypeLinksym(t.Elem())
|
||||
reflectdata.MarkTypeSymUsedInInterface(elemSym, f.fe.Func().Linksym())
|
||||
v.reset(OpAddr)
|
||||
v.Aux = symToAux(elemSym)
|
||||
v.AddArg(sb)
|
||||
return v
|
||||
}
|
||||
|
||||
base.Fatalf("fixedLoad data not known for %s:%d", sym, off)
|
||||
}
|
||||
|
||||
base.Fatalf("fixedLoad data not known for %s:%d", sym, off)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -296,9 +296,11 @@ func rewriteValueLOONG64(v *Value) bool {
|
|||
v.Op = OpLOONG64LoweredGetClosurePtr
|
||||
return true
|
||||
case OpHmul32:
|
||||
return rewriteValueLOONG64_OpHmul32(v)
|
||||
v.Op = OpLOONG64MULH
|
||||
return true
|
||||
case OpHmul32u:
|
||||
return rewriteValueLOONG64_OpHmul32u(v)
|
||||
v.Op = OpLOONG64MULHU
|
||||
return true
|
||||
case OpHmul64:
|
||||
v.Op = OpLOONG64MULHV
|
||||
return true
|
||||
|
|
@ -322,6 +324,8 @@ func rewriteValueLOONG64(v *Value) bool {
|
|||
return rewriteValueLOONG64_OpLOONG64ADDV(v)
|
||||
case OpLOONG64ADDVconst:
|
||||
return rewriteValueLOONG64_OpLOONG64ADDVconst(v)
|
||||
case OpLOONG64ADDshiftLLV:
|
||||
return rewriteValueLOONG64_OpLOONG64ADDshiftLLV(v)
|
||||
case OpLOONG64AND:
|
||||
return rewriteValueLOONG64_OpLOONG64AND(v)
|
||||
case OpLOONG64ANDconst:
|
||||
|
|
@ -1576,50 +1580,6 @@ func rewriteValueLOONG64_OpEqPtr(v *Value) bool {
|
|||
return true
|
||||
}
|
||||
}
|
||||
func rewriteValueLOONG64_OpHmul32(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
typ := &b.Func.Config.Types
|
||||
// match: (Hmul32 x y)
|
||||
// result: (SRAVconst (MULV (SignExt32to64 x) (SignExt32to64 y)) [32])
|
||||
for {
|
||||
x := v_0
|
||||
y := v_1
|
||||
v.reset(OpLOONG64SRAVconst)
|
||||
v.AuxInt = int64ToAuxInt(32)
|
||||
v0 := b.NewValue0(v.Pos, OpLOONG64MULV, typ.Int64)
|
||||
v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
|
||||
v1.AddArg(x)
|
||||
v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
|
||||
v2.AddArg(y)
|
||||
v0.AddArg2(v1, v2)
|
||||
v.AddArg(v0)
|
||||
return true
|
||||
}
|
||||
}
|
||||
func rewriteValueLOONG64_OpHmul32u(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
typ := &b.Func.Config.Types
|
||||
// match: (Hmul32u x y)
|
||||
// result: (SRLVconst (MULV (ZeroExt32to64 x) (ZeroExt32to64 y)) [32])
|
||||
for {
|
||||
x := v_0
|
||||
y := v_1
|
||||
v.reset(OpLOONG64SRLVconst)
|
||||
v.AuxInt = int64ToAuxInt(32)
|
||||
v0 := b.NewValue0(v.Pos, OpLOONG64MULV, typ.Int64)
|
||||
v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
|
||||
v1.AddArg(x)
|
||||
v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
|
||||
v2.AddArg(y)
|
||||
v0.AddArg2(v1, v2)
|
||||
v.AddArg(v0)
|
||||
return true
|
||||
}
|
||||
}
|
||||
func rewriteValueLOONG64_OpIsInBounds(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
|
|
@ -2050,6 +2010,43 @@ func rewriteValueLOONG64_OpLOONG64ADDVconst(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (ADDVconst [c] x)
|
||||
// cond: is32Bit(c) && c&0xffff == 0 && c != 0
|
||||
// result: (ADDV16const [c] x)
|
||||
for {
|
||||
c := auxIntToInt64(v.AuxInt)
|
||||
x := v_0
|
||||
if !(is32Bit(c) && c&0xffff == 0 && c != 0) {
|
||||
break
|
||||
}
|
||||
v.reset(OpLOONG64ADDV16const)
|
||||
v.AuxInt = int64ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueLOONG64_OpLOONG64ADDshiftLLV(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
// match: (ADDshiftLLV x (MOVVconst [c]) [d])
|
||||
// cond: is12Bit(c<<d)
|
||||
// result: (ADDVconst x [c<<d])
|
||||
for {
|
||||
d := auxIntToInt64(v.AuxInt)
|
||||
x := v_0
|
||||
if v_1.Op != OpLOONG64MOVVconst {
|
||||
break
|
||||
}
|
||||
c := auxIntToInt64(v_1.AuxInt)
|
||||
if !(is12Bit(c << d)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpLOONG64ADDVconst)
|
||||
v.AuxInt = int64ToAuxInt(c << d)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueLOONG64_OpLOONG64AND(v *Value) bool {
|
||||
|
|
@ -2371,6 +2368,23 @@ func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool {
|
|||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
config := b.Func.Config
|
||||
// match: (MOVBUload [off] {sym} ptr (MOVBstore [off] {sym} ptr x _))
|
||||
// result: (MOVBUreg x)
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
ptr := v_0
|
||||
if v_1.Op != OpLOONG64MOVBstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
|
||||
break
|
||||
}
|
||||
x := v_1.Args[1]
|
||||
if ptr != v_1.Args[0] {
|
||||
break
|
||||
}
|
||||
v.reset(OpLOONG64MOVBUreg)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem)
|
||||
// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
|
||||
// result: (MOVBUload [off1+int32(off2)] {sym} ptr mem)
|
||||
|
|
@ -2433,6 +2447,19 @@ func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool {
|
|||
v.AddArg3(ptr, idx, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVBUload [off] {sym} (SB) _)
|
||||
// cond: symIsRO(sym)
|
||||
// result: (MOVVconst [int64(read8(sym, int64(off)))])
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
if v_0.Op != OpSB || !(symIsRO(sym)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpLOONG64MOVVconst)
|
||||
v.AuxInt = int64ToAuxInt(int64(read8(sym, int64(off))))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueLOONG64_OpLOONG64MOVBUloadidx(v *Value) bool {
|
||||
|
|
@ -2648,6 +2675,23 @@ func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool {
|
|||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
config := b.Func.Config
|
||||
// match: (MOVBload [off] {sym} ptr (MOVBstore [off] {sym} ptr x _))
|
||||
// result: (MOVBreg x)
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
ptr := v_0
|
||||
if v_1.Op != OpLOONG64MOVBstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
|
||||
break
|
||||
}
|
||||
x := v_1.Args[1]
|
||||
if ptr != v_1.Args[0] {
|
||||
break
|
||||
}
|
||||
v.reset(OpLOONG64MOVBreg)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem)
|
||||
// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
|
||||
// result: (MOVBload [off1+int32(off2)] {sym} ptr mem)
|
||||
|
|
@ -2710,6 +2754,19 @@ func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool {
|
|||
v.AddArg3(ptr, idx, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVBload [off] {sym} (SB) _)
|
||||
// cond: symIsRO(sym)
|
||||
// result: (MOVVconst [int64(int8(read8(sym, int64(off))))])
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
if v_0.Op != OpSB || !(symIsRO(sym)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpLOONG64MOVVconst)
|
||||
v.AuxInt = int64ToAuxInt(int64(int8(read8(sym, int64(off)))))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueLOONG64_OpLOONG64MOVBloadidx(v *Value) bool {
|
||||
|
|
@ -3568,6 +3625,23 @@ func rewriteValueLOONG64_OpLOONG64MOVHUload(v *Value) bool {
|
|||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
config := b.Func.Config
|
||||
// match: (MOVHUload [off] {sym} ptr (MOVHstore [off] {sym} ptr x _))
|
||||
// result: (MOVHUreg x)
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
ptr := v_0
|
||||
if v_1.Op != OpLOONG64MOVHstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
|
||||
break
|
||||
}
|
||||
x := v_1.Args[1]
|
||||
if ptr != v_1.Args[0] {
|
||||
break
|
||||
}
|
||||
v.reset(OpLOONG64MOVHUreg)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem)
|
||||
// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
|
||||
// result: (MOVHUload [off1+int32(off2)] {sym} ptr mem)
|
||||
|
|
@ -3630,6 +3704,19 @@ func rewriteValueLOONG64_OpLOONG64MOVHUload(v *Value) bool {
|
|||
v.AddArg3(ptr, idx, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVHUload [off] {sym} (SB) _)
|
||||
// cond: symIsRO(sym)
|
||||
// result: (MOVVconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
if v_0.Op != OpSB || !(symIsRO(sym)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpLOONG64MOVVconst)
|
||||
v.AuxInt = int64ToAuxInt(int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueLOONG64_OpLOONG64MOVHUloadidx(v *Value) bool {
|
||||
|
|
@ -3807,6 +3894,23 @@ func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool {
|
|||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
config := b.Func.Config
|
||||
// match: (MOVHload [off] {sym} ptr (MOVHstore [off] {sym} ptr x _))
|
||||
// result: (MOVHreg x)
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
ptr := v_0
|
||||
if v_1.Op != OpLOONG64MOVHstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
|
||||
break
|
||||
}
|
||||
x := v_1.Args[1]
|
||||
if ptr != v_1.Args[0] {
|
||||
break
|
||||
}
|
||||
v.reset(OpLOONG64MOVHreg)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem)
|
||||
// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
|
||||
// result: (MOVHload [off1+int32(off2)] {sym} ptr mem)
|
||||
|
|
@ -3869,6 +3973,19 @@ func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool {
|
|||
v.AddArg3(ptr, idx, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVHload [off] {sym} (SB) _)
|
||||
// cond: symIsRO(sym)
|
||||
// result: (MOVVconst [int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
if v_0.Op != OpSB || !(symIsRO(sym)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpLOONG64MOVVconst)
|
||||
v.AuxInt = int64ToAuxInt(int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueLOONG64_OpLOONG64MOVHloadidx(v *Value) bool {
|
||||
|
|
@ -4250,6 +4367,23 @@ func rewriteValueLOONG64_OpLOONG64MOVVload(v *Value) bool {
|
|||
v.AddArg(val)
|
||||
return true
|
||||
}
|
||||
// match: (MOVVload [off] {sym} ptr (MOVVstore [off] {sym} ptr x _))
|
||||
// result: (MOVVreg x)
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
ptr := v_0
|
||||
if v_1.Op != OpLOONG64MOVVstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
|
||||
break
|
||||
}
|
||||
x := v_1.Args[1]
|
||||
if ptr != v_1.Args[0] {
|
||||
break
|
||||
}
|
||||
v.reset(OpLOONG64MOVVreg)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem)
|
||||
// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
|
||||
// result: (MOVVload [off1+int32(off2)] {sym} ptr mem)
|
||||
|
|
@ -4312,6 +4446,19 @@ func rewriteValueLOONG64_OpLOONG64MOVVload(v *Value) bool {
|
|||
v.AddArg3(ptr, idx, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVVload [off] {sym} (SB) _)
|
||||
// cond: symIsRO(sym)
|
||||
// result: (MOVVconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
if v_0.Op != OpSB || !(symIsRO(sym)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpLOONG64MOVVconst)
|
||||
v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueLOONG64_OpLOONG64MOVVloadidx(v *Value) bool {
|
||||
|
|
@ -4558,6 +4705,23 @@ func rewriteValueLOONG64_OpLOONG64MOVWUload(v *Value) bool {
|
|||
v.AddArg(v0)
|
||||
return true
|
||||
}
|
||||
// match: (MOVWUload [off] {sym} ptr (MOVWstore [off] {sym} ptr x _))
|
||||
// result: (MOVWUreg x)
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
ptr := v_0
|
||||
if v_1.Op != OpLOONG64MOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
|
||||
break
|
||||
}
|
||||
x := v_1.Args[1]
|
||||
if ptr != v_1.Args[0] {
|
||||
break
|
||||
}
|
||||
v.reset(OpLOONG64MOVWUreg)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem)
|
||||
// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
|
||||
// result: (MOVWUload [off1+int32(off2)] {sym} ptr mem)
|
||||
|
|
@ -4620,6 +4784,19 @@ func rewriteValueLOONG64_OpLOONG64MOVWUload(v *Value) bool {
|
|||
v.AddArg3(ptr, idx, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVWUload [off] {sym} (SB) _)
|
||||
// cond: symIsRO(sym)
|
||||
// result: (MOVVconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
if v_0.Op != OpSB || !(symIsRO(sym)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpLOONG64MOVVconst)
|
||||
v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueLOONG64_OpLOONG64MOVWUloadidx(v *Value) bool {
|
||||
|
|
@ -4830,6 +5007,23 @@ func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool {
|
|||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
config := b.Func.Config
|
||||
// match: (MOVWload [off] {sym} ptr (MOVWstore [off] {sym} ptr x _))
|
||||
// result: (MOVWreg x)
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
ptr := v_0
|
||||
if v_1.Op != OpLOONG64MOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
|
||||
break
|
||||
}
|
||||
x := v_1.Args[1]
|
||||
if ptr != v_1.Args[0] {
|
||||
break
|
||||
}
|
||||
v.reset(OpLOONG64MOVWreg)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem)
|
||||
// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
|
||||
// result: (MOVWload [off1+int32(off2)] {sym} ptr mem)
|
||||
|
|
@ -4892,6 +5086,19 @@ func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool {
|
|||
v.AddArg3(ptr, idx, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVWload [off] {sym} (SB) _)
|
||||
// cond: symIsRO(sym)
|
||||
// result: (MOVVconst [int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
if v_0.Op != OpSB || !(symIsRO(sym)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpLOONG64MOVVconst)
|
||||
v.AuxInt = int64ToAuxInt(int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueLOONG64_OpLOONG64MOVWloadidx(v *Value) bool {
|
||||
|
|
@ -9056,62 +9263,35 @@ func rewriteValueLOONG64_OpMove(v *Value) bool {
|
|||
return true
|
||||
}
|
||||
// match: (Move [s] dst src mem)
|
||||
// cond: s%8 != 0 && s > 16
|
||||
// result: (Move [s%8] (OffPtr <dst.Type> dst [s-s%8]) (OffPtr <src.Type> src [s-s%8]) (Move [s-s%8] dst src mem))
|
||||
// cond: s > 16 && s < 192 && logLargeCopy(v, s)
|
||||
// result: (LoweredMove [s] dst src mem)
|
||||
for {
|
||||
s := auxIntToInt64(v.AuxInt)
|
||||
dst := v_0
|
||||
src := v_1
|
||||
mem := v_2
|
||||
if !(s%8 != 0 && s > 16) {
|
||||
if !(s > 16 && s < 192 && logLargeCopy(v, s)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMove)
|
||||
v.AuxInt = int64ToAuxInt(s % 8)
|
||||
v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
|
||||
v0.AuxInt = int64ToAuxInt(s - s%8)
|
||||
v0.AddArg(dst)
|
||||
v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
|
||||
v1.AuxInt = int64ToAuxInt(s - s%8)
|
||||
v1.AddArg(src)
|
||||
v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
|
||||
v2.AuxInt = int64ToAuxInt(s - s%8)
|
||||
v2.AddArg3(dst, src, mem)
|
||||
v.AddArg3(v0, v1, v2)
|
||||
return true
|
||||
}
|
||||
// match: (Move [s] dst src mem)
|
||||
// cond: s%8 == 0 && s > 16 && s <= 8*128 && logLargeCopy(v, s)
|
||||
// result: (DUFFCOPY [16 * (128 - s/8)] dst src mem)
|
||||
for {
|
||||
s := auxIntToInt64(v.AuxInt)
|
||||
dst := v_0
|
||||
src := v_1
|
||||
mem := v_2
|
||||
if !(s%8 == 0 && s > 16 && s <= 8*128 && logLargeCopy(v, s)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpLOONG64DUFFCOPY)
|
||||
v.AuxInt = int64ToAuxInt(16 * (128 - s/8))
|
||||
v.reset(OpLOONG64LoweredMove)
|
||||
v.AuxInt = int64ToAuxInt(s)
|
||||
v.AddArg3(dst, src, mem)
|
||||
return true
|
||||
}
|
||||
// match: (Move [s] dst src mem)
|
||||
// cond: s%8 == 0 && s > 1024 && logLargeCopy(v, s)
|
||||
// result: (LoweredMove dst src (ADDVconst <src.Type> src [s-8]) mem)
|
||||
// cond: s >= 192 && logLargeCopy(v, s)
|
||||
// result: (LoweredMoveLoop [s] dst src mem)
|
||||
for {
|
||||
s := auxIntToInt64(v.AuxInt)
|
||||
dst := v_0
|
||||
src := v_1
|
||||
mem := v_2
|
||||
if !(s%8 == 0 && s > 1024 && logLargeCopy(v, s)) {
|
||||
if !(s >= 192 && logLargeCopy(v, s)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpLOONG64LoweredMove)
|
||||
v0 := b.NewValue0(v.Pos, OpLOONG64ADDVconst, src.Type)
|
||||
v0.AuxInt = int64ToAuxInt(s - 8)
|
||||
v0.AddArg(src)
|
||||
v.AddArg4(dst, src, v0, mem)
|
||||
v.reset(OpLOONG64LoweredMoveLoop)
|
||||
v.AuxInt = int64ToAuxInt(s)
|
||||
v.AddArg3(dst, src, mem)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
|
|
|||
|
|
@ -332,8 +332,6 @@ func rewriteValueMIPS64(v *Value) bool {
|
|||
return rewriteValueMIPS64_OpMIPS64MOVBreg(v)
|
||||
case OpMIPS64MOVBstore:
|
||||
return rewriteValueMIPS64_OpMIPS64MOVBstore(v)
|
||||
case OpMIPS64MOVBstorezero:
|
||||
return rewriteValueMIPS64_OpMIPS64MOVBstorezero(v)
|
||||
case OpMIPS64MOVDload:
|
||||
return rewriteValueMIPS64_OpMIPS64MOVDload(v)
|
||||
case OpMIPS64MOVDstore:
|
||||
|
|
@ -352,8 +350,6 @@ func rewriteValueMIPS64(v *Value) bool {
|
|||
return rewriteValueMIPS64_OpMIPS64MOVHreg(v)
|
||||
case OpMIPS64MOVHstore:
|
||||
return rewriteValueMIPS64_OpMIPS64MOVHstore(v)
|
||||
case OpMIPS64MOVHstorezero:
|
||||
return rewriteValueMIPS64_OpMIPS64MOVHstorezero(v)
|
||||
case OpMIPS64MOVVload:
|
||||
return rewriteValueMIPS64_OpMIPS64MOVVload(v)
|
||||
case OpMIPS64MOVVnop:
|
||||
|
|
@ -362,8 +358,6 @@ func rewriteValueMIPS64(v *Value) bool {
|
|||
return rewriteValueMIPS64_OpMIPS64MOVVreg(v)
|
||||
case OpMIPS64MOVVstore:
|
||||
return rewriteValueMIPS64_OpMIPS64MOVVstore(v)
|
||||
case OpMIPS64MOVVstorezero:
|
||||
return rewriteValueMIPS64_OpMIPS64MOVVstorezero(v)
|
||||
case OpMIPS64MOVWUload:
|
||||
return rewriteValueMIPS64_OpMIPS64MOVWUload(v)
|
||||
case OpMIPS64MOVWUreg:
|
||||
|
|
@ -374,8 +368,6 @@ func rewriteValueMIPS64(v *Value) bool {
|
|||
return rewriteValueMIPS64_OpMIPS64MOVWreg(v)
|
||||
case OpMIPS64MOVWstore:
|
||||
return rewriteValueMIPS64_OpMIPS64MOVWstore(v)
|
||||
case OpMIPS64MOVWstorezero:
|
||||
return rewriteValueMIPS64_OpMIPS64MOVWstorezero(v)
|
||||
case OpMIPS64NEGV:
|
||||
return rewriteValueMIPS64_OpMIPS64NEGV(v)
|
||||
case OpMIPS64NOR:
|
||||
|
|
@ -3095,22 +3087,6 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool {
|
|||
v.AddArg3(ptr, val, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVBstore [off] {sym} ptr (MOVVconst [0]) mem)
|
||||
// result: (MOVBstorezero [off] {sym} ptr mem)
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
ptr := v_0
|
||||
if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
|
||||
break
|
||||
}
|
||||
mem := v_2
|
||||
v.reset(OpMIPS64MOVBstorezero)
|
||||
v.AuxInt = int32ToAuxInt(off)
|
||||
v.Aux = symToAux(sym)
|
||||
v.AddArg2(ptr, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
|
||||
// result: (MOVBstore [off] {sym} ptr x mem)
|
||||
for {
|
||||
|
|
@ -3215,56 +3191,6 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64MOVBstorezero(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
config := b.Func.Config
|
||||
// match: (MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
|
||||
// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
|
||||
// result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
|
||||
for {
|
||||
off1 := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
if v_0.Op != OpMIPS64ADDVconst {
|
||||
break
|
||||
}
|
||||
off2 := auxIntToInt64(v_0.AuxInt)
|
||||
ptr := v_0.Args[0]
|
||||
mem := v_1
|
||||
if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPS64MOVBstorezero)
|
||||
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
|
||||
v.Aux = symToAux(sym)
|
||||
v.AddArg2(ptr, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
|
||||
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
|
||||
// result: (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
|
||||
for {
|
||||
off1 := auxIntToInt32(v.AuxInt)
|
||||
sym1 := auxToSym(v.Aux)
|
||||
if v_0.Op != OpMIPS64MOVVaddr {
|
||||
break
|
||||
}
|
||||
off2 := auxIntToInt32(v_0.AuxInt)
|
||||
sym2 := auxToSym(v_0.Aux)
|
||||
ptr := v_0.Args[0]
|
||||
mem := v_1
|
||||
if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPS64MOVBstorezero)
|
||||
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
|
||||
v.Aux = symToAux(mergeSym(sym1, sym2))
|
||||
v.AddArg2(ptr, mem)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64MOVDload(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
|
|
@ -3856,22 +3782,6 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool {
|
|||
v.AddArg3(ptr, val, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVHstore [off] {sym} ptr (MOVVconst [0]) mem)
|
||||
// result: (MOVHstorezero [off] {sym} ptr mem)
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
ptr := v_0
|
||||
if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
|
||||
break
|
||||
}
|
||||
mem := v_2
|
||||
v.reset(OpMIPS64MOVHstorezero)
|
||||
v.AuxInt = int32ToAuxInt(off)
|
||||
v.Aux = symToAux(sym)
|
||||
v.AddArg2(ptr, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
|
||||
// result: (MOVHstore [off] {sym} ptr x mem)
|
||||
for {
|
||||
|
|
@ -3942,56 +3852,6 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64MOVHstorezero(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
config := b.Func.Config
|
||||
// match: (MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
|
||||
// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
|
||||
// result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
|
||||
for {
|
||||
off1 := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
if v_0.Op != OpMIPS64ADDVconst {
|
||||
break
|
||||
}
|
||||
off2 := auxIntToInt64(v_0.AuxInt)
|
||||
ptr := v_0.Args[0]
|
||||
mem := v_1
|
||||
if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPS64MOVHstorezero)
|
||||
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
|
||||
v.Aux = symToAux(sym)
|
||||
v.AddArg2(ptr, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
|
||||
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
|
||||
// result: (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
|
||||
for {
|
||||
off1 := auxIntToInt32(v.AuxInt)
|
||||
sym1 := auxToSym(v.Aux)
|
||||
if v_0.Op != OpMIPS64MOVVaddr {
|
||||
break
|
||||
}
|
||||
off2 := auxIntToInt32(v_0.AuxInt)
|
||||
sym2 := auxToSym(v_0.Aux)
|
||||
ptr := v_0.Args[0]
|
||||
mem := v_1
|
||||
if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPS64MOVHstorezero)
|
||||
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
|
||||
v.Aux = symToAux(mergeSym(sym1, sym2))
|
||||
v.AddArg2(ptr, mem)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64MOVVload(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
|
|
@ -4182,72 +4042,6 @@ func rewriteValueMIPS64_OpMIPS64MOVVstore(v *Value) bool {
|
|||
v.AddArg3(ptr, val, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVVstore [off] {sym} ptr (MOVVconst [0]) mem)
|
||||
// result: (MOVVstorezero [off] {sym} ptr mem)
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
ptr := v_0
|
||||
if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
|
||||
break
|
||||
}
|
||||
mem := v_2
|
||||
v.reset(OpMIPS64MOVVstorezero)
|
||||
v.AuxInt = int32ToAuxInt(off)
|
||||
v.Aux = symToAux(sym)
|
||||
v.AddArg2(ptr, mem)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64MOVVstorezero(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
config := b.Func.Config
|
||||
// match: (MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
|
||||
// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
|
||||
// result: (MOVVstorezero [off1+int32(off2)] {sym} ptr mem)
|
||||
for {
|
||||
off1 := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
if v_0.Op != OpMIPS64ADDVconst {
|
||||
break
|
||||
}
|
||||
off2 := auxIntToInt64(v_0.AuxInt)
|
||||
ptr := v_0.Args[0]
|
||||
mem := v_1
|
||||
if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPS64MOVVstorezero)
|
||||
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
|
||||
v.Aux = symToAux(sym)
|
||||
v.AddArg2(ptr, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
|
||||
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
|
||||
// result: (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
|
||||
for {
|
||||
off1 := auxIntToInt32(v.AuxInt)
|
||||
sym1 := auxToSym(v.Aux)
|
||||
if v_0.Op != OpMIPS64MOVVaddr {
|
||||
break
|
||||
}
|
||||
off2 := auxIntToInt32(v_0.AuxInt)
|
||||
sym2 := auxToSym(v_0.Aux)
|
||||
ptr := v_0.Args[0]
|
||||
mem := v_1
|
||||
if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPS64MOVVstorezero)
|
||||
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
|
||||
v.Aux = symToAux(mergeSym(sym1, sym2))
|
||||
v.AddArg2(ptr, mem)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64MOVWUload(v *Value) bool {
|
||||
|
|
@ -4659,22 +4453,6 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool {
|
|||
v.AddArg3(ptr, val, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVWstore [off] {sym} ptr (MOVVconst [0]) mem)
|
||||
// result: (MOVWstorezero [off] {sym} ptr mem)
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
ptr := v_0
|
||||
if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
|
||||
break
|
||||
}
|
||||
mem := v_2
|
||||
v.reset(OpMIPS64MOVWstorezero)
|
||||
v.AuxInt = int32ToAuxInt(off)
|
||||
v.Aux = symToAux(sym)
|
||||
v.AddArg2(ptr, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
|
||||
// result: (MOVWstore [off] {sym} ptr x mem)
|
||||
for {
|
||||
|
|
@ -4711,56 +4489,6 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64MOVWstorezero(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
config := b.Func.Config
|
||||
// match: (MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
|
||||
// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
|
||||
// result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
|
||||
for {
|
||||
off1 := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
if v_0.Op != OpMIPS64ADDVconst {
|
||||
break
|
||||
}
|
||||
off2 := auxIntToInt64(v_0.AuxInt)
|
||||
ptr := v_0.Args[0]
|
||||
mem := v_1
|
||||
if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPS64MOVWstorezero)
|
||||
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
|
||||
v.Aux = symToAux(sym)
|
||||
v.AddArg2(ptr, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
|
||||
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
|
||||
// result: (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
|
||||
for {
|
||||
off1 := auxIntToInt32(v.AuxInt)
|
||||
sym1 := auxToSym(v.Aux)
|
||||
if v_0.Op != OpMIPS64MOVVaddr {
|
||||
break
|
||||
}
|
||||
off2 := auxIntToInt32(v_0.AuxInt)
|
||||
sym2 := auxToSym(v_0.Aux)
|
||||
ptr := v_0.Args[0]
|
||||
mem := v_1
|
||||
if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPS64MOVWstorezero)
|
||||
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
|
||||
v.Aux = symToAux(mergeSym(sym1, sym2))
|
||||
v.AddArg2(ptr, mem)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueMIPS64_OpMIPS64NEGV(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
// match: (NEGV (SUBV x y))
|
||||
|
|
|
|||
26
src/cmd/compile/internal/ssa/rewriteMIPS64latelower.go
Normal file
26
src/cmd/compile/internal/ssa/rewriteMIPS64latelower.go
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
// Code generated from _gen/MIPS64latelower.rules using 'go generate'; DO NOT EDIT.
|
||||
|
||||
package ssa
|
||||
|
||||
func rewriteValueMIPS64latelower(v *Value) bool {
|
||||
switch v.Op {
|
||||
case OpMIPS64MOVVconst:
|
||||
return rewriteValueMIPS64latelower_OpMIPS64MOVVconst(v)
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueMIPS64latelower_OpMIPS64MOVVconst(v *Value) bool {
|
||||
// match: (MOVVconst [0])
|
||||
// result: (ZERO)
|
||||
for {
|
||||
if auxIntToInt64(v.AuxInt) != 0 {
|
||||
break
|
||||
}
|
||||
v.reset(OpMIPS64ZERO)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteBlockMIPS64latelower(b *Block) bool {
|
||||
return false
|
||||
}
|
||||
|
|
@ -3090,169 +3090,38 @@ func rewriteValueRISCV64_OpMove(v *Value) bool {
|
|||
v.AddArg3(dst, v0, v1)
|
||||
return true
|
||||
}
|
||||
// match: (Move [12] {t} dst src mem)
|
||||
// cond: t.Alignment()%4 == 0
|
||||
// result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)))
|
||||
for {
|
||||
if auxIntToInt64(v.AuxInt) != 12 {
|
||||
break
|
||||
}
|
||||
t := auxToType(v.Aux)
|
||||
dst := v_0
|
||||
src := v_1
|
||||
mem := v_2
|
||||
if !(t.Alignment()%4 == 0) {
|
||||
break
|
||||
}
|
||||
v.reset(OpRISCV64MOVWstore)
|
||||
v.AuxInt = int32ToAuxInt(8)
|
||||
v0 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
|
||||
v0.AuxInt = int32ToAuxInt(8)
|
||||
v0.AddArg2(src, mem)
|
||||
v1 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
|
||||
v1.AuxInt = int32ToAuxInt(4)
|
||||
v2 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
|
||||
v2.AuxInt = int32ToAuxInt(4)
|
||||
v2.AddArg2(src, mem)
|
||||
v3 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
|
||||
v4 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
|
||||
v4.AddArg2(src, mem)
|
||||
v3.AddArg3(dst, v4, mem)
|
||||
v1.AddArg3(dst, v2, v3)
|
||||
v.AddArg3(dst, v0, v1)
|
||||
return true
|
||||
}
|
||||
// match: (Move [16] {t} dst src mem)
|
||||
// cond: t.Alignment()%8 == 0
|
||||
// result: (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))
|
||||
for {
|
||||
if auxIntToInt64(v.AuxInt) != 16 {
|
||||
break
|
||||
}
|
||||
t := auxToType(v.Aux)
|
||||
dst := v_0
|
||||
src := v_1
|
||||
mem := v_2
|
||||
if !(t.Alignment()%8 == 0) {
|
||||
break
|
||||
}
|
||||
v.reset(OpRISCV64MOVDstore)
|
||||
v.AuxInt = int32ToAuxInt(8)
|
||||
v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
|
||||
v0.AuxInt = int32ToAuxInt(8)
|
||||
v0.AddArg2(src, mem)
|
||||
v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
|
||||
v2 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
|
||||
v2.AddArg2(src, mem)
|
||||
v1.AddArg3(dst, v2, mem)
|
||||
v.AddArg3(dst, v0, v1)
|
||||
return true
|
||||
}
|
||||
// match: (Move [24] {t} dst src mem)
|
||||
// cond: t.Alignment()%8 == 0
|
||||
// result: (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)))
|
||||
for {
|
||||
if auxIntToInt64(v.AuxInt) != 24 {
|
||||
break
|
||||
}
|
||||
t := auxToType(v.Aux)
|
||||
dst := v_0
|
||||
src := v_1
|
||||
mem := v_2
|
||||
if !(t.Alignment()%8 == 0) {
|
||||
break
|
||||
}
|
||||
v.reset(OpRISCV64MOVDstore)
|
||||
v.AuxInt = int32ToAuxInt(16)
|
||||
v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
|
||||
v0.AuxInt = int32ToAuxInt(16)
|
||||
v0.AddArg2(src, mem)
|
||||
v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
|
||||
v1.AuxInt = int32ToAuxInt(8)
|
||||
v2 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
|
||||
v2.AuxInt = int32ToAuxInt(8)
|
||||
v2.AddArg2(src, mem)
|
||||
v3 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
|
||||
v4 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
|
||||
v4.AddArg2(src, mem)
|
||||
v3.AddArg3(dst, v4, mem)
|
||||
v1.AddArg3(dst, v2, v3)
|
||||
v.AddArg3(dst, v0, v1)
|
||||
return true
|
||||
}
|
||||
// match: (Move [32] {t} dst src mem)
|
||||
// cond: t.Alignment()%8 == 0
|
||||
// result: (MOVDstore [24] dst (MOVDload [24] src mem) (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))))
|
||||
for {
|
||||
if auxIntToInt64(v.AuxInt) != 32 {
|
||||
break
|
||||
}
|
||||
t := auxToType(v.Aux)
|
||||
dst := v_0
|
||||
src := v_1
|
||||
mem := v_2
|
||||
if !(t.Alignment()%8 == 0) {
|
||||
break
|
||||
}
|
||||
v.reset(OpRISCV64MOVDstore)
|
||||
v.AuxInt = int32ToAuxInt(24)
|
||||
v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
|
||||
v0.AuxInt = int32ToAuxInt(24)
|
||||
v0.AddArg2(src, mem)
|
||||
v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
|
||||
v1.AuxInt = int32ToAuxInt(16)
|
||||
v2 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
|
||||
v2.AuxInt = int32ToAuxInt(16)
|
||||
v2.AddArg2(src, mem)
|
||||
v3 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
|
||||
v3.AuxInt = int32ToAuxInt(8)
|
||||
v4 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
|
||||
v4.AuxInt = int32ToAuxInt(8)
|
||||
v4.AddArg2(src, mem)
|
||||
v5 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
|
||||
v6 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
|
||||
v6.AddArg2(src, mem)
|
||||
v5.AddArg3(dst, v6, mem)
|
||||
v3.AddArg3(dst, v4, v5)
|
||||
v1.AddArg3(dst, v2, v3)
|
||||
v.AddArg3(dst, v0, v1)
|
||||
return true
|
||||
}
|
||||
// match: (Move [s] {t} dst src mem)
|
||||
// cond: s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && logLargeCopy(v, s)
|
||||
// result: (DUFFCOPY [16 * (128 - s/8)] dst src mem)
|
||||
// cond: s > 0 && s <= 3*8*moveSize(t.Alignment(), config) && logLargeCopy(v, s)
|
||||
// result: (LoweredMove [makeValAndOff(int32(s),int32(t.Alignment()))] dst src mem)
|
||||
for {
|
||||
s := auxIntToInt64(v.AuxInt)
|
||||
t := auxToType(v.Aux)
|
||||
dst := v_0
|
||||
src := v_1
|
||||
mem := v_2
|
||||
if !(s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && logLargeCopy(v, s)) {
|
||||
if !(s > 0 && s <= 3*8*moveSize(t.Alignment(), config) && logLargeCopy(v, s)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpRISCV64DUFFCOPY)
|
||||
v.AuxInt = int64ToAuxInt(16 * (128 - s/8))
|
||||
v.reset(OpRISCV64LoweredMove)
|
||||
v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s), int32(t.Alignment())))
|
||||
v.AddArg3(dst, src, mem)
|
||||
return true
|
||||
}
|
||||
// match: (Move [s] {t} dst src mem)
|
||||
// cond: (s <= 16 || logLargeCopy(v, s))
|
||||
// result: (LoweredMove [t.Alignment()] dst src (ADDI <src.Type> [s-moveSize(t.Alignment(), config)] src) mem)
|
||||
// cond: s > 3*8*moveSize(t.Alignment(), config) && logLargeCopy(v, s)
|
||||
// result: (LoweredMoveLoop [makeValAndOff(int32(s),int32(t.Alignment()))] dst src mem)
|
||||
for {
|
||||
s := auxIntToInt64(v.AuxInt)
|
||||
t := auxToType(v.Aux)
|
||||
dst := v_0
|
||||
src := v_1
|
||||
mem := v_2
|
||||
if !(s <= 16 || logLargeCopy(v, s)) {
|
||||
if !(s > 3*8*moveSize(t.Alignment(), config) && logLargeCopy(v, s)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpRISCV64LoweredMove)
|
||||
v.AuxInt = int64ToAuxInt(t.Alignment())
|
||||
v0 := b.NewValue0(v.Pos, OpRISCV64ADDI, src.Type)
|
||||
v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config))
|
||||
v0.AddArg(src)
|
||||
v.AddArg4(dst, src, v0, mem)
|
||||
v.reset(OpRISCV64LoweredMoveLoop)
|
||||
v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s), int32(t.Alignment())))
|
||||
v.AddArg3(dst, src, mem)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
|
@ -4740,6 +4609,25 @@ func rewriteValueRISCV64_OpRISCV64MOVBUload(v *Value) bool {
|
|||
v.AddArg2(base, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVBUload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _))
|
||||
// cond: isSamePtr(ptr1, ptr2)
|
||||
// result: (MOVBUreg x)
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
ptr1 := v_0
|
||||
if v_1.Op != OpRISCV64MOVBstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
|
||||
break
|
||||
}
|
||||
x := v_1.Args[1]
|
||||
ptr2 := v_1.Args[0]
|
||||
if !(isSamePtr(ptr1, ptr2)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpRISCV64MOVBUreg)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueRISCV64_OpRISCV64MOVBUreg(v *Value) bool {
|
||||
|
|
@ -5049,6 +4937,25 @@ func rewriteValueRISCV64_OpRISCV64MOVBload(v *Value) bool {
|
|||
v.AddArg2(base, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVBload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _))
|
||||
// cond: isSamePtr(ptr1, ptr2)
|
||||
// result: (MOVBreg x)
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
ptr1 := v_0
|
||||
if v_1.Op != OpRISCV64MOVBstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
|
||||
break
|
||||
}
|
||||
x := v_1.Args[1]
|
||||
ptr2 := v_1.Args[0]
|
||||
if !(isSamePtr(ptr1, ptr2)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpRISCV64MOVBreg)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueRISCV64_OpRISCV64MOVBreg(v *Value) bool {
|
||||
|
|
@ -5397,6 +5304,25 @@ func rewriteValueRISCV64_OpRISCV64MOVDload(v *Value) bool {
|
|||
v.AddArg2(base, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _))
|
||||
// cond: isSamePtr(ptr1, ptr2)
|
||||
// result: (MOVDreg x)
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
ptr1 := v_0
|
||||
if v_1.Op != OpRISCV64MOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
|
||||
break
|
||||
}
|
||||
x := v_1.Args[1]
|
||||
ptr2 := v_1.Args[0]
|
||||
if !(isSamePtr(ptr1, ptr2)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpRISCV64MOVDreg)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _))
|
||||
// cond: isSamePtr(ptr1, ptr2)
|
||||
// result: (FMVXD x)
|
||||
|
|
@ -5616,6 +5542,25 @@ func rewriteValueRISCV64_OpRISCV64MOVHUload(v *Value) bool {
|
|||
v.AddArg2(base, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVHUload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _))
|
||||
// cond: isSamePtr(ptr1, ptr2)
|
||||
// result: (MOVHUreg x)
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
ptr1 := v_0
|
||||
if v_1.Op != OpRISCV64MOVHstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
|
||||
break
|
||||
}
|
||||
x := v_1.Args[1]
|
||||
ptr2 := v_1.Args[0]
|
||||
if !(isSamePtr(ptr1, ptr2)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpRISCV64MOVHUreg)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueRISCV64_OpRISCV64MOVHUreg(v *Value) bool {
|
||||
|
|
@ -5782,6 +5727,25 @@ func rewriteValueRISCV64_OpRISCV64MOVHload(v *Value) bool {
|
|||
v.AddArg2(base, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVHload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _))
|
||||
// cond: isSamePtr(ptr1, ptr2)
|
||||
// result: (MOVHreg x)
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
ptr1 := v_0
|
||||
if v_1.Op != OpRISCV64MOVHstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
|
||||
break
|
||||
}
|
||||
x := v_1.Args[1]
|
||||
ptr2 := v_1.Args[0]
|
||||
if !(isSamePtr(ptr1, ptr2)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpRISCV64MOVHreg)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueRISCV64_OpRISCV64MOVHreg(v *Value) bool {
|
||||
|
|
@ -6141,6 +6105,25 @@ func rewriteValueRISCV64_OpRISCV64MOVWUload(v *Value) bool {
|
|||
v.AddArg2(base, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVWUload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _))
|
||||
// cond: isSamePtr(ptr1, ptr2)
|
||||
// result: (MOVWUreg x)
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
ptr1 := v_0
|
||||
if v_1.Op != OpRISCV64MOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
|
||||
break
|
||||
}
|
||||
x := v_1.Args[1]
|
||||
ptr2 := v_1.Args[0]
|
||||
if !(isSamePtr(ptr1, ptr2)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpRISCV64MOVWUreg)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVWUload [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _))
|
||||
// cond: isSamePtr(ptr1, ptr2)
|
||||
// result: (MOVWUreg (FMVXS x))
|
||||
|
|
@ -6352,6 +6335,25 @@ func rewriteValueRISCV64_OpRISCV64MOVWload(v *Value) bool {
|
|||
v.AddArg2(base, mem)
|
||||
return true
|
||||
}
|
||||
// match: (MOVWload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _))
|
||||
// cond: isSamePtr(ptr1, ptr2)
|
||||
// result: (MOVWreg x)
|
||||
for {
|
||||
off := auxIntToInt32(v.AuxInt)
|
||||
sym := auxToSym(v.Aux)
|
||||
ptr1 := v_0
|
||||
if v_1.Op != OpRISCV64MOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
|
||||
break
|
||||
}
|
||||
x := v_1.Args[1]
|
||||
ptr2 := v_1.Args[0]
|
||||
if !(isSamePtr(ptr1, ptr2)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpRISCV64MOVWreg)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVWload [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _))
|
||||
// cond: isSamePtr(ptr1, ptr2)
|
||||
// result: (FMVXS x)
|
||||
|
|
@ -9792,138 +9794,39 @@ func rewriteValueRISCV64_OpZero(v *Value) bool {
|
|||
v.AddArg3(ptr, v0, v1)
|
||||
return true
|
||||
}
|
||||
// match: (Zero [12] {t} ptr mem)
|
||||
// cond: t.Alignment()%4 == 0
|
||||
// result: (MOVWstore [8] ptr (MOVDconst [0]) (MOVWstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem)))
|
||||
for {
|
||||
if auxIntToInt64(v.AuxInt) != 12 {
|
||||
break
|
||||
}
|
||||
t := auxToType(v.Aux)
|
||||
ptr := v_0
|
||||
mem := v_1
|
||||
if !(t.Alignment()%4 == 0) {
|
||||
break
|
||||
}
|
||||
v.reset(OpRISCV64MOVWstore)
|
||||
v.AuxInt = int32ToAuxInt(8)
|
||||
v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
|
||||
v0.AuxInt = int64ToAuxInt(0)
|
||||
v1 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
|
||||
v1.AuxInt = int32ToAuxInt(4)
|
||||
v2 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
|
||||
v2.AddArg3(ptr, v0, mem)
|
||||
v1.AddArg3(ptr, v0, v2)
|
||||
v.AddArg3(ptr, v0, v1)
|
||||
return true
|
||||
}
|
||||
// match: (Zero [16] {t} ptr mem)
|
||||
// cond: t.Alignment()%8 == 0
|
||||
// result: (MOVDstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))
|
||||
for {
|
||||
if auxIntToInt64(v.AuxInt) != 16 {
|
||||
break
|
||||
}
|
||||
t := auxToType(v.Aux)
|
||||
ptr := v_0
|
||||
mem := v_1
|
||||
if !(t.Alignment()%8 == 0) {
|
||||
break
|
||||
}
|
||||
v.reset(OpRISCV64MOVDstore)
|
||||
v.AuxInt = int32ToAuxInt(8)
|
||||
v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
|
||||
v0.AuxInt = int64ToAuxInt(0)
|
||||
v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
|
||||
v1.AddArg3(ptr, v0, mem)
|
||||
v.AddArg3(ptr, v0, v1)
|
||||
return true
|
||||
}
|
||||
// match: (Zero [24] {t} ptr mem)
|
||||
// cond: t.Alignment()%8 == 0
|
||||
// result: (MOVDstore [16] ptr (MOVDconst [0]) (MOVDstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)))
|
||||
for {
|
||||
if auxIntToInt64(v.AuxInt) != 24 {
|
||||
break
|
||||
}
|
||||
t := auxToType(v.Aux)
|
||||
ptr := v_0
|
||||
mem := v_1
|
||||
if !(t.Alignment()%8 == 0) {
|
||||
break
|
||||
}
|
||||
v.reset(OpRISCV64MOVDstore)
|
||||
v.AuxInt = int32ToAuxInt(16)
|
||||
v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
|
||||
v0.AuxInt = int64ToAuxInt(0)
|
||||
v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
|
||||
v1.AuxInt = int32ToAuxInt(8)
|
||||
v2 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
|
||||
v2.AddArg3(ptr, v0, mem)
|
||||
v1.AddArg3(ptr, v0, v2)
|
||||
v.AddArg3(ptr, v0, v1)
|
||||
return true
|
||||
}
|
||||
// match: (Zero [32] {t} ptr mem)
|
||||
// cond: t.Alignment()%8 == 0
|
||||
// result: (MOVDstore [24] ptr (MOVDconst [0]) (MOVDstore [16] ptr (MOVDconst [0]) (MOVDstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))))
|
||||
for {
|
||||
if auxIntToInt64(v.AuxInt) != 32 {
|
||||
break
|
||||
}
|
||||
t := auxToType(v.Aux)
|
||||
ptr := v_0
|
||||
mem := v_1
|
||||
if !(t.Alignment()%8 == 0) {
|
||||
break
|
||||
}
|
||||
v.reset(OpRISCV64MOVDstore)
|
||||
v.AuxInt = int32ToAuxInt(24)
|
||||
v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
|
||||
v0.AuxInt = int64ToAuxInt(0)
|
||||
v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
|
||||
v1.AuxInt = int32ToAuxInt(16)
|
||||
v2 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
|
||||
v2.AuxInt = int32ToAuxInt(8)
|
||||
v3 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
|
||||
v3.AddArg3(ptr, v0, mem)
|
||||
v2.AddArg3(ptr, v0, v3)
|
||||
v1.AddArg3(ptr, v0, v2)
|
||||
v.AddArg3(ptr, v0, v1)
|
||||
return true
|
||||
}
|
||||
// match: (Zero [s] {t} ptr mem)
|
||||
// cond: s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0
|
||||
// result: (DUFFZERO [8 * (128 - s/8)] ptr mem)
|
||||
// cond: s <= 24*moveSize(t.Alignment(), config)
|
||||
// result: (LoweredZero [makeValAndOff(int32(s),int32(t.Alignment()))] ptr mem)
|
||||
for {
|
||||
s := auxIntToInt64(v.AuxInt)
|
||||
t := auxToType(v.Aux)
|
||||
ptr := v_0
|
||||
mem := v_1
|
||||
if !(s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0) {
|
||||
if !(s <= 24*moveSize(t.Alignment(), config)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpRISCV64DUFFZERO)
|
||||
v.AuxInt = int64ToAuxInt(8 * (128 - s/8))
|
||||
v.reset(OpRISCV64LoweredZero)
|
||||
v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s), int32(t.Alignment())))
|
||||
v.AddArg2(ptr, mem)
|
||||
return true
|
||||
}
|
||||
// match: (Zero [s] {t} ptr mem)
|
||||
// result: (LoweredZero [t.Alignment()] ptr (ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.Alignment(), config)])) mem)
|
||||
// cond: s > 24*moveSize(t.Alignment(), config)
|
||||
// result: (LoweredZeroLoop [makeValAndOff(int32(s),int32(t.Alignment()))] ptr mem)
|
||||
for {
|
||||
s := auxIntToInt64(v.AuxInt)
|
||||
t := auxToType(v.Aux)
|
||||
ptr := v_0
|
||||
mem := v_1
|
||||
v.reset(OpRISCV64LoweredZero)
|
||||
v.AuxInt = int64ToAuxInt(t.Alignment())
|
||||
v0 := b.NewValue0(v.Pos, OpRISCV64ADD, ptr.Type)
|
||||
v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
|
||||
v1.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config))
|
||||
v0.AddArg2(ptr, v1)
|
||||
v.AddArg3(ptr, v0, mem)
|
||||
if !(s > 24*moveSize(t.Alignment(), config)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpRISCV64LoweredZeroLoop)
|
||||
v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s), int32(t.Alignment())))
|
||||
v.AddArg2(ptr, mem)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteBlockRISCV64(b *Block) bool {
|
||||
typ := &b.Func.Config.Types
|
||||
|
|
|
|||
|
|
@ -14089,8 +14089,6 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
|
|||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
config := b.Func.Config
|
||||
typ := &b.Func.Config.Types
|
||||
// match: (Load <t1> p1 (Store {t2} p2 x _))
|
||||
// cond: isSamePtr(p1, p2) && copyCompatibleType(t1, x.Type) && t1.Size() == t2.Size()
|
||||
// result: x
|
||||
|
|
@ -14675,11 +14673,95 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
|
|||
v.AddArg(v0)
|
||||
return true
|
||||
}
|
||||
// match: (Load <typ.BytePtr> (OffPtr [off] (Addr {s} sb) ) _)
|
||||
// cond: isFixedSym(s, off)
|
||||
// result: (Addr {fixedSym(b.Func, s, off)} sb)
|
||||
// match: (Load (Addr {s} sb) _)
|
||||
// cond: isFixedLoad(v, s, 0)
|
||||
// result: rewriteFixedLoad(v, s, sb, 0)
|
||||
for {
|
||||
if v.Type != typ.BytePtr || v_0.Op != OpOffPtr {
|
||||
if v_0.Op != OpAddr {
|
||||
break
|
||||
}
|
||||
s := auxToSym(v_0.Aux)
|
||||
sb := v_0.Args[0]
|
||||
if !(isFixedLoad(v, s, 0)) {
|
||||
break
|
||||
}
|
||||
v.copyOf(rewriteFixedLoad(v, s, sb, 0))
|
||||
return true
|
||||
}
|
||||
// match: (Load (Convert (Addr {s} sb) _) _)
|
||||
// cond: isFixedLoad(v, s, 0)
|
||||
// result: rewriteFixedLoad(v, s, sb, 0)
|
||||
for {
|
||||
if v_0.Op != OpConvert {
|
||||
break
|
||||
}
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpAddr {
|
||||
break
|
||||
}
|
||||
s := auxToSym(v_0_0.Aux)
|
||||
sb := v_0_0.Args[0]
|
||||
if !(isFixedLoad(v, s, 0)) {
|
||||
break
|
||||
}
|
||||
v.copyOf(rewriteFixedLoad(v, s, sb, 0))
|
||||
return true
|
||||
}
|
||||
// match: (Load (ITab (IMake (Addr {s} sb) _)) _)
|
||||
// cond: isFixedLoad(v, s, 0)
|
||||
// result: rewriteFixedLoad(v, s, sb, 0)
|
||||
for {
|
||||
if v_0.Op != OpITab {
|
||||
break
|
||||
}
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpIMake {
|
||||
break
|
||||
}
|
||||
v_0_0_0 := v_0_0.Args[0]
|
||||
if v_0_0_0.Op != OpAddr {
|
||||
break
|
||||
}
|
||||
s := auxToSym(v_0_0_0.Aux)
|
||||
sb := v_0_0_0.Args[0]
|
||||
if !(isFixedLoad(v, s, 0)) {
|
||||
break
|
||||
}
|
||||
v.copyOf(rewriteFixedLoad(v, s, sb, 0))
|
||||
return true
|
||||
}
|
||||
// match: (Load (ITab (IMake (Convert (Addr {s} sb) _) _)) _)
|
||||
// cond: isFixedLoad(v, s, 0)
|
||||
// result: rewriteFixedLoad(v, s, sb, 0)
|
||||
for {
|
||||
if v_0.Op != OpITab {
|
||||
break
|
||||
}
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpIMake {
|
||||
break
|
||||
}
|
||||
v_0_0_0 := v_0_0.Args[0]
|
||||
if v_0_0_0.Op != OpConvert {
|
||||
break
|
||||
}
|
||||
v_0_0_0_0 := v_0_0_0.Args[0]
|
||||
if v_0_0_0_0.Op != OpAddr {
|
||||
break
|
||||
}
|
||||
s := auxToSym(v_0_0_0_0.Aux)
|
||||
sb := v_0_0_0_0.Args[0]
|
||||
if !(isFixedLoad(v, s, 0)) {
|
||||
break
|
||||
}
|
||||
v.copyOf(rewriteFixedLoad(v, s, sb, 0))
|
||||
return true
|
||||
}
|
||||
// match: (Load (OffPtr [off] (Addr {s} sb) ) _)
|
||||
// cond: isFixedLoad(v, s, off)
|
||||
// result: rewriteFixedLoad(v, s, sb, off)
|
||||
for {
|
||||
if v_0.Op != OpOffPtr {
|
||||
break
|
||||
}
|
||||
off := auxIntToInt64(v_0.AuxInt)
|
||||
|
|
@ -14689,19 +14771,17 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
|
|||
}
|
||||
s := auxToSym(v_0_0.Aux)
|
||||
sb := v_0_0.Args[0]
|
||||
if !(isFixedSym(s, off)) {
|
||||
if !(isFixedLoad(v, s, off)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpAddr)
|
||||
v.Aux = symToAux(fixedSym(b.Func, s, off))
|
||||
v.AddArg(sb)
|
||||
v.copyOf(rewriteFixedLoad(v, s, sb, off))
|
||||
return true
|
||||
}
|
||||
// match: (Load <typ.BytePtr> (OffPtr [off] (Convert (Addr {s} sb) _) ) _)
|
||||
// cond: isFixedSym(s, off)
|
||||
// result: (Addr {fixedSym(b.Func, s, off)} sb)
|
||||
// match: (Load (OffPtr [off] (Convert (Addr {s} sb) _) ) _)
|
||||
// cond: isFixedLoad(v, s, off)
|
||||
// result: rewriteFixedLoad(v, s, sb, off)
|
||||
for {
|
||||
if v.Type != typ.BytePtr || v_0.Op != OpOffPtr {
|
||||
if v_0.Op != OpOffPtr {
|
||||
break
|
||||
}
|
||||
off := auxIntToInt64(v_0.AuxInt)
|
||||
|
|
@ -14715,19 +14795,17 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
|
|||
}
|
||||
s := auxToSym(v_0_0_0.Aux)
|
||||
sb := v_0_0_0.Args[0]
|
||||
if !(isFixedSym(s, off)) {
|
||||
if !(isFixedLoad(v, s, off)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpAddr)
|
||||
v.Aux = symToAux(fixedSym(b.Func, s, off))
|
||||
v.AddArg(sb)
|
||||
v.copyOf(rewriteFixedLoad(v, s, sb, off))
|
||||
return true
|
||||
}
|
||||
// match: (Load <typ.BytePtr> (OffPtr [off] (ITab (IMake (Addr {s} sb) _))) _)
|
||||
// cond: isFixedSym(s, off)
|
||||
// result: (Addr {fixedSym(b.Func, s, off)} sb)
|
||||
// match: (Load (OffPtr [off] (ITab (IMake (Addr {s} sb) _))) _)
|
||||
// cond: isFixedLoad(v, s, off)
|
||||
// result: rewriteFixedLoad(v, s, sb, off)
|
||||
for {
|
||||
if v.Type != typ.BytePtr || v_0.Op != OpOffPtr {
|
||||
if v_0.Op != OpOffPtr {
|
||||
break
|
||||
}
|
||||
off := auxIntToInt64(v_0.AuxInt)
|
||||
|
|
@ -14745,19 +14823,17 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
|
|||
}
|
||||
s := auxToSym(v_0_0_0_0.Aux)
|
||||
sb := v_0_0_0_0.Args[0]
|
||||
if !(isFixedSym(s, off)) {
|
||||
if !(isFixedLoad(v, s, off)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpAddr)
|
||||
v.Aux = symToAux(fixedSym(b.Func, s, off))
|
||||
v.AddArg(sb)
|
||||
v.copyOf(rewriteFixedLoad(v, s, sb, off))
|
||||
return true
|
||||
}
|
||||
// match: (Load <typ.BytePtr> (OffPtr [off] (ITab (IMake (Convert (Addr {s} sb) _) _))) _)
|
||||
// cond: isFixedSym(s, off)
|
||||
// result: (Addr {fixedSym(b.Func, s, off)} sb)
|
||||
// match: (Load (OffPtr [off] (ITab (IMake (Convert (Addr {s} sb) _) _))) _)
|
||||
// cond: isFixedLoad(v, s, off)
|
||||
// result: rewriteFixedLoad(v, s, sb, off)
|
||||
for {
|
||||
if v.Type != typ.BytePtr || v_0.Op != OpOffPtr {
|
||||
if v_0.Op != OpOffPtr {
|
||||
break
|
||||
}
|
||||
off := auxIntToInt64(v_0.AuxInt)
|
||||
|
|
@ -14779,232 +14855,10 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
|
|||
}
|
||||
s := auxToSym(v_0_0_0_0_0.Aux)
|
||||
sb := v_0_0_0_0_0.Args[0]
|
||||
if !(isFixedSym(s, off)) {
|
||||
if !(isFixedLoad(v, s, off)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpAddr)
|
||||
v.Aux = symToAux(fixedSym(b.Func, s, off))
|
||||
v.AddArg(sb)
|
||||
return true
|
||||
}
|
||||
// match: (Load <typ.Uintptr> (OffPtr [off] (Addr {s} sb) ) _)
|
||||
// cond: isFixedSym(s, off)
|
||||
// result: (Addr {fixedSym(b.Func, s, off)} sb)
|
||||
for {
|
||||
if v.Type != typ.Uintptr || v_0.Op != OpOffPtr {
|
||||
break
|
||||
}
|
||||
off := auxIntToInt64(v_0.AuxInt)
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpAddr {
|
||||
break
|
||||
}
|
||||
s := auxToSym(v_0_0.Aux)
|
||||
sb := v_0_0.Args[0]
|
||||
if !(isFixedSym(s, off)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpAddr)
|
||||
v.Aux = symToAux(fixedSym(b.Func, s, off))
|
||||
v.AddArg(sb)
|
||||
return true
|
||||
}
|
||||
// match: (Load <typ.Uintptr> (OffPtr [off] (Convert (Addr {s} sb) _) ) _)
|
||||
// cond: isFixedSym(s, off)
|
||||
// result: (Addr {fixedSym(b.Func, s, off)} sb)
|
||||
for {
|
||||
if v.Type != typ.Uintptr || v_0.Op != OpOffPtr {
|
||||
break
|
||||
}
|
||||
off := auxIntToInt64(v_0.AuxInt)
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpConvert {
|
||||
break
|
||||
}
|
||||
v_0_0_0 := v_0_0.Args[0]
|
||||
if v_0_0_0.Op != OpAddr {
|
||||
break
|
||||
}
|
||||
s := auxToSym(v_0_0_0.Aux)
|
||||
sb := v_0_0_0.Args[0]
|
||||
if !(isFixedSym(s, off)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpAddr)
|
||||
v.Aux = symToAux(fixedSym(b.Func, s, off))
|
||||
v.AddArg(sb)
|
||||
return true
|
||||
}
|
||||
// match: (Load <typ.Uintptr> (OffPtr [off] (ITab (IMake (Addr {s} sb) _))) _)
|
||||
// cond: isFixedSym(s, off)
|
||||
// result: (Addr {fixedSym(b.Func, s, off)} sb)
|
||||
for {
|
||||
if v.Type != typ.Uintptr || v_0.Op != OpOffPtr {
|
||||
break
|
||||
}
|
||||
off := auxIntToInt64(v_0.AuxInt)
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpITab {
|
||||
break
|
||||
}
|
||||
v_0_0_0 := v_0_0.Args[0]
|
||||
if v_0_0_0.Op != OpIMake {
|
||||
break
|
||||
}
|
||||
v_0_0_0_0 := v_0_0_0.Args[0]
|
||||
if v_0_0_0_0.Op != OpAddr {
|
||||
break
|
||||
}
|
||||
s := auxToSym(v_0_0_0_0.Aux)
|
||||
sb := v_0_0_0_0.Args[0]
|
||||
if !(isFixedSym(s, off)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpAddr)
|
||||
v.Aux = symToAux(fixedSym(b.Func, s, off))
|
||||
v.AddArg(sb)
|
||||
return true
|
||||
}
|
||||
// match: (Load <typ.Uintptr> (OffPtr [off] (ITab (IMake (Convert (Addr {s} sb) _) _))) _)
|
||||
// cond: isFixedSym(s, off)
|
||||
// result: (Addr {fixedSym(b.Func, s, off)} sb)
|
||||
for {
|
||||
if v.Type != typ.Uintptr || v_0.Op != OpOffPtr {
|
||||
break
|
||||
}
|
||||
off := auxIntToInt64(v_0.AuxInt)
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpITab {
|
||||
break
|
||||
}
|
||||
v_0_0_0 := v_0_0.Args[0]
|
||||
if v_0_0_0.Op != OpIMake {
|
||||
break
|
||||
}
|
||||
v_0_0_0_0 := v_0_0_0.Args[0]
|
||||
if v_0_0_0_0.Op != OpConvert {
|
||||
break
|
||||
}
|
||||
v_0_0_0_0_0 := v_0_0_0_0.Args[0]
|
||||
if v_0_0_0_0_0.Op != OpAddr {
|
||||
break
|
||||
}
|
||||
s := auxToSym(v_0_0_0_0_0.Aux)
|
||||
sb := v_0_0_0_0_0.Args[0]
|
||||
if !(isFixedSym(s, off)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpAddr)
|
||||
v.Aux = symToAux(fixedSym(b.Func, s, off))
|
||||
v.AddArg(sb)
|
||||
return true
|
||||
}
|
||||
// match: (Load <t> (OffPtr [off] (Addr {sym} _) ) _)
|
||||
// cond: t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)
|
||||
// result: (Const32 [fixed32(config, sym, off)])
|
||||
for {
|
||||
t := v.Type
|
||||
if v_0.Op != OpOffPtr {
|
||||
break
|
||||
}
|
||||
off := auxIntToInt64(v_0.AuxInt)
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpAddr {
|
||||
break
|
||||
}
|
||||
sym := auxToSym(v_0_0.Aux)
|
||||
if !(t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpConst32)
|
||||
v.AuxInt = int32ToAuxInt(fixed32(config, sym, off))
|
||||
return true
|
||||
}
|
||||
// match: (Load <t> (OffPtr [off] (Convert (Addr {sym} _) _) ) _)
|
||||
// cond: t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)
|
||||
// result: (Const32 [fixed32(config, sym, off)])
|
||||
for {
|
||||
t := v.Type
|
||||
if v_0.Op != OpOffPtr {
|
||||
break
|
||||
}
|
||||
off := auxIntToInt64(v_0.AuxInt)
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpConvert {
|
||||
break
|
||||
}
|
||||
v_0_0_0 := v_0_0.Args[0]
|
||||
if v_0_0_0.Op != OpAddr {
|
||||
break
|
||||
}
|
||||
sym := auxToSym(v_0_0_0.Aux)
|
||||
if !(t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpConst32)
|
||||
v.AuxInt = int32ToAuxInt(fixed32(config, sym, off))
|
||||
return true
|
||||
}
|
||||
// match: (Load <t> (OffPtr [off] (ITab (IMake (Addr {sym} _) _))) _)
|
||||
// cond: t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)
|
||||
// result: (Const32 [fixed32(config, sym, off)])
|
||||
for {
|
||||
t := v.Type
|
||||
if v_0.Op != OpOffPtr {
|
||||
break
|
||||
}
|
||||
off := auxIntToInt64(v_0.AuxInt)
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpITab {
|
||||
break
|
||||
}
|
||||
v_0_0_0 := v_0_0.Args[0]
|
||||
if v_0_0_0.Op != OpIMake {
|
||||
break
|
||||
}
|
||||
v_0_0_0_0 := v_0_0_0.Args[0]
|
||||
if v_0_0_0_0.Op != OpAddr {
|
||||
break
|
||||
}
|
||||
sym := auxToSym(v_0_0_0_0.Aux)
|
||||
if !(t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpConst32)
|
||||
v.AuxInt = int32ToAuxInt(fixed32(config, sym, off))
|
||||
return true
|
||||
}
|
||||
// match: (Load <t> (OffPtr [off] (ITab (IMake (Convert (Addr {sym} _) _) _))) _)
|
||||
// cond: t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)
|
||||
// result: (Const32 [fixed32(config, sym, off)])
|
||||
for {
|
||||
t := v.Type
|
||||
if v_0.Op != OpOffPtr {
|
||||
break
|
||||
}
|
||||
off := auxIntToInt64(v_0.AuxInt)
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpITab {
|
||||
break
|
||||
}
|
||||
v_0_0_0 := v_0_0.Args[0]
|
||||
if v_0_0_0.Op != OpIMake {
|
||||
break
|
||||
}
|
||||
v_0_0_0_0 := v_0_0_0.Args[0]
|
||||
if v_0_0_0_0.Op != OpConvert {
|
||||
break
|
||||
}
|
||||
v_0_0_0_0_0 := v_0_0_0_0.Args[0]
|
||||
if v_0_0_0_0_0.Op != OpAddr {
|
||||
break
|
||||
}
|
||||
sym := auxToSym(v_0_0_0_0_0.Aux)
|
||||
if !(t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpConst32)
|
||||
v.AuxInt = int32ToAuxInt(fixed32(config, sym, off))
|
||||
v.copyOf(rewriteFixedLoad(v, s, sb, off))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
|
@ -31117,6 +30971,390 @@ func rewriteValuegeneric_OpStaticLECall(v *Value) bool {
|
|||
v.AddArg2(v0, mem)
|
||||
return true
|
||||
}
|
||||
// match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [3]) mem)
|
||||
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)
|
||||
// result: (MakeResult (Eq32 (Or32 <typ.Int32> (ZeroExt16to32 <typ.Int32> (Load <typ.Int16> sptr mem)) (Lsh32x32 <typ.Int32> (ZeroExt8to32 <typ.Int32> (Load <typ.Int8> (OffPtr <typ.BytePtr> [2] sptr) mem)) (Const32 <typ.Int32> [16]))) (Const32 <typ.Int32> [int32(uint32(read16(scon,0,config.ctxt.Arch.ByteOrder))|(uint32(read8(scon,2))<<16))])) mem)
|
||||
for {
|
||||
if len(v.Args) != 4 {
|
||||
break
|
||||
}
|
||||
callAux := auxToCall(v.Aux)
|
||||
mem := v.Args[3]
|
||||
sptr := v.Args[0]
|
||||
v_1 := v.Args[1]
|
||||
if v_1.Op != OpAddr {
|
||||
break
|
||||
}
|
||||
scon := auxToSym(v_1.Aux)
|
||||
v_1_0 := v_1.Args[0]
|
||||
if v_1_0.Op != OpSB {
|
||||
break
|
||||
}
|
||||
v_2 := v.Args[2]
|
||||
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 3 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMakeResult)
|
||||
v0 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
|
||||
v1 := b.NewValue0(v.Pos, OpOr32, typ.Int32)
|
||||
v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.Int32)
|
||||
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int16)
|
||||
v3.AddArg2(sptr, mem)
|
||||
v2.AddArg(v3)
|
||||
v4 := b.NewValue0(v.Pos, OpLsh32x32, typ.Int32)
|
||||
v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.Int32)
|
||||
v6 := b.NewValue0(v.Pos, OpLoad, typ.Int8)
|
||||
v7 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtr)
|
||||
v7.AuxInt = int64ToAuxInt(2)
|
||||
v7.AddArg(sptr)
|
||||
v6.AddArg2(v7, mem)
|
||||
v5.AddArg(v6)
|
||||
v8 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
|
||||
v8.AuxInt = int32ToAuxInt(16)
|
||||
v4.AddArg2(v5, v8)
|
||||
v1.AddArg2(v2, v4)
|
||||
v9 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
|
||||
v9.AuxInt = int32ToAuxInt(int32(uint32(read16(scon, 0, config.ctxt.Arch.ByteOrder)) | (uint32(read8(scon, 2)) << 16)))
|
||||
v0.AddArg2(v1, v9)
|
||||
v.AddArg2(v0, mem)
|
||||
return true
|
||||
}
|
||||
// match: (StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [3]) mem)
|
||||
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)
|
||||
// result: (MakeResult (Eq32 (Or32 <typ.Int32> (ZeroExt16to32 <typ.Int32> (Load <typ.Int16> sptr mem)) (Lsh32x32 <typ.Int32> (ZeroExt8to32 <typ.Int32> (Load <typ.Int8> (OffPtr <typ.BytePtr> [2] sptr) mem)) (Const32 <typ.Int32> [16]))) (Const32 <typ.Int32> [int32(uint32(read16(scon,0,config.ctxt.Arch.ByteOrder))|(uint32(read8(scon,2))<<16))])) mem)
|
||||
for {
|
||||
if len(v.Args) != 4 {
|
||||
break
|
||||
}
|
||||
callAux := auxToCall(v.Aux)
|
||||
mem := v.Args[3]
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpAddr {
|
||||
break
|
||||
}
|
||||
scon := auxToSym(v_0.Aux)
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpSB {
|
||||
break
|
||||
}
|
||||
sptr := v.Args[1]
|
||||
v_2 := v.Args[2]
|
||||
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 3 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMakeResult)
|
||||
v0 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
|
||||
v1 := b.NewValue0(v.Pos, OpOr32, typ.Int32)
|
||||
v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.Int32)
|
||||
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int16)
|
||||
v3.AddArg2(sptr, mem)
|
||||
v2.AddArg(v3)
|
||||
v4 := b.NewValue0(v.Pos, OpLsh32x32, typ.Int32)
|
||||
v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.Int32)
|
||||
v6 := b.NewValue0(v.Pos, OpLoad, typ.Int8)
|
||||
v7 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtr)
|
||||
v7.AuxInt = int64ToAuxInt(2)
|
||||
v7.AddArg(sptr)
|
||||
v6.AddArg2(v7, mem)
|
||||
v5.AddArg(v6)
|
||||
v8 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
|
||||
v8.AuxInt = int32ToAuxInt(16)
|
||||
v4.AddArg2(v5, v8)
|
||||
v1.AddArg2(v2, v4)
|
||||
v9 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
|
||||
v9.AuxInt = int32ToAuxInt(int32(uint32(read16(scon, 0, config.ctxt.Arch.ByteOrder)) | (uint32(read8(scon, 2)) << 16)))
|
||||
v0.AddArg2(v1, v9)
|
||||
v.AddArg2(v0, mem)
|
||||
return true
|
||||
}
|
||||
// match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [5]) mem)
|
||||
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8
|
||||
// result: (MakeResult (Eq64 (Or64 <typ.Int64> (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem)) (Lsh64x64 <typ.Int64> (ZeroExt8to64 <typ.Int64> (Load <typ.Int8> (OffPtr <typ.BytePtr> [4] sptr) mem)) (Const64 <typ.Int64> [32]))) (Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read8(scon,4))<<32))])) mem)
|
||||
for {
|
||||
if len(v.Args) != 4 {
|
||||
break
|
||||
}
|
||||
callAux := auxToCall(v.Aux)
|
||||
mem := v.Args[3]
|
||||
sptr := v.Args[0]
|
||||
v_1 := v.Args[1]
|
||||
if v_1.Op != OpAddr {
|
||||
break
|
||||
}
|
||||
scon := auxToSym(v_1.Aux)
|
||||
v_1_0 := v_1.Args[0]
|
||||
if v_1_0.Op != OpSB {
|
||||
break
|
||||
}
|
||||
v_2 := v.Args[2]
|
||||
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 5 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMakeResult)
|
||||
v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool)
|
||||
v1 := b.NewValue0(v.Pos, OpOr64, typ.Int64)
|
||||
v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.Int64)
|
||||
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
|
||||
v3.AddArg2(sptr, mem)
|
||||
v2.AddArg(v3)
|
||||
v4 := b.NewValue0(v.Pos, OpLsh64x64, typ.Int64)
|
||||
v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.Int64)
|
||||
v6 := b.NewValue0(v.Pos, OpLoad, typ.Int8)
|
||||
v7 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtr)
|
||||
v7.AuxInt = int64ToAuxInt(4)
|
||||
v7.AddArg(sptr)
|
||||
v6.AddArg2(v7, mem)
|
||||
v5.AddArg(v6)
|
||||
v8 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
|
||||
v8.AuxInt = int64ToAuxInt(32)
|
||||
v4.AddArg2(v5, v8)
|
||||
v1.AddArg2(v2, v4)
|
||||
v9 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
|
||||
v9.AuxInt = int64ToAuxInt(int64(uint64(read32(scon, 0, config.ctxt.Arch.ByteOrder)) | (uint64(read8(scon, 4)) << 32)))
|
||||
v0.AddArg2(v1, v9)
|
||||
v.AddArg2(v0, mem)
|
||||
return true
|
||||
}
|
||||
// match: (StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [5]) mem)
|
||||
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8
|
||||
// result: (MakeResult (Eq64 (Or64 <typ.Int64> (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem)) (Lsh64x64 <typ.Int64> (ZeroExt8to64 <typ.Int64> (Load <typ.Int8> (OffPtr <typ.BytePtr> [4] sptr) mem)) (Const64 <typ.Int64> [32]))) (Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read8(scon,4))<<32))])) mem)
|
||||
for {
|
||||
if len(v.Args) != 4 {
|
||||
break
|
||||
}
|
||||
callAux := auxToCall(v.Aux)
|
||||
mem := v.Args[3]
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpAddr {
|
||||
break
|
||||
}
|
||||
scon := auxToSym(v_0.Aux)
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpSB {
|
||||
break
|
||||
}
|
||||
sptr := v.Args[1]
|
||||
v_2 := v.Args[2]
|
||||
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 5 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMakeResult)
|
||||
v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool)
|
||||
v1 := b.NewValue0(v.Pos, OpOr64, typ.Int64)
|
||||
v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.Int64)
|
||||
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
|
||||
v3.AddArg2(sptr, mem)
|
||||
v2.AddArg(v3)
|
||||
v4 := b.NewValue0(v.Pos, OpLsh64x64, typ.Int64)
|
||||
v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.Int64)
|
||||
v6 := b.NewValue0(v.Pos, OpLoad, typ.Int8)
|
||||
v7 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtr)
|
||||
v7.AuxInt = int64ToAuxInt(4)
|
||||
v7.AddArg(sptr)
|
||||
v6.AddArg2(v7, mem)
|
||||
v5.AddArg(v6)
|
||||
v8 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
|
||||
v8.AuxInt = int64ToAuxInt(32)
|
||||
v4.AddArg2(v5, v8)
|
||||
v1.AddArg2(v2, v4)
|
||||
v9 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
|
||||
v9.AuxInt = int64ToAuxInt(int64(uint64(read32(scon, 0, config.ctxt.Arch.ByteOrder)) | (uint64(read8(scon, 4)) << 32)))
|
||||
v0.AddArg2(v1, v9)
|
||||
v.AddArg2(v0, mem)
|
||||
return true
|
||||
}
|
||||
// match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [6]) mem)
|
||||
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8
|
||||
// result: (MakeResult (Eq64 (Or64 <typ.Int64> (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem)) (Lsh64x64 <typ.Int64> (ZeroExt16to64 <typ.Int64> (Load <typ.Int16> (OffPtr <typ.BytePtr> [4] sptr) mem)) (Const64 <typ.Int64> [32]))) (Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read16(scon,4,config.ctxt.Arch.ByteOrder))<<32))])) mem)
|
||||
for {
|
||||
if len(v.Args) != 4 {
|
||||
break
|
||||
}
|
||||
callAux := auxToCall(v.Aux)
|
||||
mem := v.Args[3]
|
||||
sptr := v.Args[0]
|
||||
v_1 := v.Args[1]
|
||||
if v_1.Op != OpAddr {
|
||||
break
|
||||
}
|
||||
scon := auxToSym(v_1.Aux)
|
||||
v_1_0 := v_1.Args[0]
|
||||
if v_1_0.Op != OpSB {
|
||||
break
|
||||
}
|
||||
v_2 := v.Args[2]
|
||||
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 6 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMakeResult)
|
||||
v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool)
|
||||
v1 := b.NewValue0(v.Pos, OpOr64, typ.Int64)
|
||||
v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.Int64)
|
||||
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
|
||||
v3.AddArg2(sptr, mem)
|
||||
v2.AddArg(v3)
|
||||
v4 := b.NewValue0(v.Pos, OpLsh64x64, typ.Int64)
|
||||
v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.Int64)
|
||||
v6 := b.NewValue0(v.Pos, OpLoad, typ.Int16)
|
||||
v7 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtr)
|
||||
v7.AuxInt = int64ToAuxInt(4)
|
||||
v7.AddArg(sptr)
|
||||
v6.AddArg2(v7, mem)
|
||||
v5.AddArg(v6)
|
||||
v8 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
|
||||
v8.AuxInt = int64ToAuxInt(32)
|
||||
v4.AddArg2(v5, v8)
|
||||
v1.AddArg2(v2, v4)
|
||||
v9 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
|
||||
v9.AuxInt = int64ToAuxInt(int64(uint64(read32(scon, 0, config.ctxt.Arch.ByteOrder)) | (uint64(read16(scon, 4, config.ctxt.Arch.ByteOrder)) << 32)))
|
||||
v0.AddArg2(v1, v9)
|
||||
v.AddArg2(v0, mem)
|
||||
return true
|
||||
}
|
||||
// match: (StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [6]) mem)
|
||||
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8
|
||||
// result: (MakeResult (Eq64 (Or64 <typ.Int64> (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem)) (Lsh64x64 <typ.Int64> (ZeroExt16to64 <typ.Int64> (Load <typ.Int16> (OffPtr <typ.BytePtr> [4] sptr) mem)) (Const64 <typ.Int64> [32]))) (Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read16(scon,4,config.ctxt.Arch.ByteOrder))<<32))])) mem)
|
||||
for {
|
||||
if len(v.Args) != 4 {
|
||||
break
|
||||
}
|
||||
callAux := auxToCall(v.Aux)
|
||||
mem := v.Args[3]
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpAddr {
|
||||
break
|
||||
}
|
||||
scon := auxToSym(v_0.Aux)
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpSB {
|
||||
break
|
||||
}
|
||||
sptr := v.Args[1]
|
||||
v_2 := v.Args[2]
|
||||
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 6 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMakeResult)
|
||||
v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool)
|
||||
v1 := b.NewValue0(v.Pos, OpOr64, typ.Int64)
|
||||
v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.Int64)
|
||||
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
|
||||
v3.AddArg2(sptr, mem)
|
||||
v2.AddArg(v3)
|
||||
v4 := b.NewValue0(v.Pos, OpLsh64x64, typ.Int64)
|
||||
v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.Int64)
|
||||
v6 := b.NewValue0(v.Pos, OpLoad, typ.Int16)
|
||||
v7 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtr)
|
||||
v7.AuxInt = int64ToAuxInt(4)
|
||||
v7.AddArg(sptr)
|
||||
v6.AddArg2(v7, mem)
|
||||
v5.AddArg(v6)
|
||||
v8 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
|
||||
v8.AuxInt = int64ToAuxInt(32)
|
||||
v4.AddArg2(v5, v8)
|
||||
v1.AddArg2(v2, v4)
|
||||
v9 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
|
||||
v9.AuxInt = int64ToAuxInt(int64(uint64(read32(scon, 0, config.ctxt.Arch.ByteOrder)) | (uint64(read16(scon, 4, config.ctxt.Arch.ByteOrder)) << 32)))
|
||||
v0.AddArg2(v1, v9)
|
||||
v.AddArg2(v0, mem)
|
||||
return true
|
||||
}
|
||||
// match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [7]) mem)
|
||||
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8
|
||||
// result: (MakeResult (Eq64 (Or64 <typ.Int64> (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem)) (Lsh64x64 <typ.Int64> (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> (OffPtr <typ.BytePtr> [3] sptr) mem)) (Const64 <typ.Int64> [32]))) (Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read32(scon,3,config.ctxt.Arch.ByteOrder))<<32))])) mem)
|
||||
for {
|
||||
if len(v.Args) != 4 {
|
||||
break
|
||||
}
|
||||
callAux := auxToCall(v.Aux)
|
||||
mem := v.Args[3]
|
||||
sptr := v.Args[0]
|
||||
v_1 := v.Args[1]
|
||||
if v_1.Op != OpAddr {
|
||||
break
|
||||
}
|
||||
scon := auxToSym(v_1.Aux)
|
||||
v_1_0 := v_1.Args[0]
|
||||
if v_1_0.Op != OpSB {
|
||||
break
|
||||
}
|
||||
v_2 := v.Args[2]
|
||||
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 7 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMakeResult)
|
||||
v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool)
|
||||
v1 := b.NewValue0(v.Pos, OpOr64, typ.Int64)
|
||||
v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.Int64)
|
||||
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
|
||||
v3.AddArg2(sptr, mem)
|
||||
v2.AddArg(v3)
|
||||
v4 := b.NewValue0(v.Pos, OpLsh64x64, typ.Int64)
|
||||
v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.Int64)
|
||||
v6 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
|
||||
v7 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtr)
|
||||
v7.AuxInt = int64ToAuxInt(3)
|
||||
v7.AddArg(sptr)
|
||||
v6.AddArg2(v7, mem)
|
||||
v5.AddArg(v6)
|
||||
v8 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
|
||||
v8.AuxInt = int64ToAuxInt(32)
|
||||
v4.AddArg2(v5, v8)
|
||||
v1.AddArg2(v2, v4)
|
||||
v9 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
|
||||
v9.AuxInt = int64ToAuxInt(int64(uint64(read32(scon, 0, config.ctxt.Arch.ByteOrder)) | (uint64(read32(scon, 3, config.ctxt.Arch.ByteOrder)) << 32)))
|
||||
v0.AddArg2(v1, v9)
|
||||
v.AddArg2(v0, mem)
|
||||
return true
|
||||
}
|
||||
// match: (StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [7]) mem)
|
||||
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8
|
||||
// result: (MakeResult (Eq64 (Or64 <typ.Int64> (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem)) (Lsh64x64 <typ.Int64> (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> (OffPtr <typ.BytePtr> [3] sptr) mem)) (Const64 <typ.Int64> [32]))) (Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read32(scon,3,config.ctxt.Arch.ByteOrder))<<32))])) mem)
|
||||
for {
|
||||
if len(v.Args) != 4 {
|
||||
break
|
||||
}
|
||||
callAux := auxToCall(v.Aux)
|
||||
mem := v.Args[3]
|
||||
v_0 := v.Args[0]
|
||||
if v_0.Op != OpAddr {
|
||||
break
|
||||
}
|
||||
scon := auxToSym(v_0.Aux)
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpSB {
|
||||
break
|
||||
}
|
||||
sptr := v.Args[1]
|
||||
v_2 := v.Args[2]
|
||||
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 7 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMakeResult)
|
||||
v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool)
|
||||
v1 := b.NewValue0(v.Pos, OpOr64, typ.Int64)
|
||||
v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.Int64)
|
||||
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
|
||||
v3.AddArg2(sptr, mem)
|
||||
v2.AddArg(v3)
|
||||
v4 := b.NewValue0(v.Pos, OpLsh64x64, typ.Int64)
|
||||
v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.Int64)
|
||||
v6 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
|
||||
v7 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtr)
|
||||
v7.AuxInt = int64ToAuxInt(3)
|
||||
v7.AddArg(sptr)
|
||||
v6.AddArg2(v7, mem)
|
||||
v5.AddArg(v6)
|
||||
v8 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
|
||||
v8.AuxInt = int64ToAuxInt(32)
|
||||
v4.AddArg2(v5, v8)
|
||||
v1.AddArg2(v2, v4)
|
||||
v9 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
|
||||
v9.AuxInt = int64ToAuxInt(int64(uint64(read32(scon, 0, config.ctxt.Arch.ByteOrder)) | (uint64(read32(scon, 3, config.ctxt.Arch.ByteOrder)) << 32)))
|
||||
v0.AddArg2(v1, v9)
|
||||
v.AddArg2(v0, mem)
|
||||
return true
|
||||
}
|
||||
// match: (StaticLECall {callAux} _ _ (Const64 [0]) mem)
|
||||
// cond: isSameCall(callAux, "runtime.memequal")
|
||||
// result: (MakeResult (ConstBool <typ.Bool> [true]) mem)
|
||||
|
|
|
|||
|
|
@ -144,6 +144,13 @@ func (v *Value) AuxArm64BitField() arm64BitField {
|
|||
return arm64BitField(v.AuxInt)
|
||||
}
|
||||
|
||||
func (v *Value) AuxArm64ConditionalParams() arm64ConditionalParams {
|
||||
if opcodeTable[v.Op].auxType != auxARM64ConditionalParams {
|
||||
v.Fatalf("op %s doesn't have a ARM64ConditionalParams aux field", v.Op)
|
||||
}
|
||||
return auxIntToArm64ConditionalParams(v.AuxInt)
|
||||
}
|
||||
|
||||
// long form print. v# = opcode <type> [aux] args [: reg] (names)
|
||||
func (v *Value) LongString() string {
|
||||
if v == nil {
|
||||
|
|
@ -203,6 +210,15 @@ func (v *Value) auxString() string {
|
|||
lsb := v.AuxArm64BitField().lsb()
|
||||
width := v.AuxArm64BitField().width()
|
||||
return fmt.Sprintf(" [lsb=%d,width=%d]", lsb, width)
|
||||
case auxARM64ConditionalParams:
|
||||
params := v.AuxArm64ConditionalParams()
|
||||
cond := params.Cond()
|
||||
nzcv := params.Nzcv()
|
||||
imm, ok := params.ConstValue()
|
||||
if ok {
|
||||
return fmt.Sprintf(" [cond=%s,nzcv=%d,imm=%d]", cond, nzcv, imm)
|
||||
}
|
||||
return fmt.Sprintf(" [cond=%s,nzcv=%d]", cond, nzcv)
|
||||
case auxFloat32, auxFloat64:
|
||||
return fmt.Sprintf(" [%g]", v.AuxFloat())
|
||||
case auxString:
|
||||
|
|
|
|||
|
|
@ -233,6 +233,13 @@ func TestIntendedInlining(t *testing.T) {
|
|||
"testing": {
|
||||
"(*B).Loop",
|
||||
},
|
||||
"path": {
|
||||
"Base",
|
||||
"scanChunk",
|
||||
},
|
||||
"path/filepath": {
|
||||
"scanChunk",
|
||||
},
|
||||
}
|
||||
|
||||
if runtime.GOARCH != "386" && runtime.GOARCH != "loong64" && runtime.GOARCH != "mips64" && runtime.GOARCH != "mips64le" && runtime.GOARCH != "riscv64" {
|
||||
|
|
|
|||
|
|
@ -162,12 +162,25 @@ func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
|
|||
}
|
||||
|
||||
// resolve resolves the type parameters, methods, and underlying type of n.
|
||||
// This information may be loaded from a provided loader function, or computed
|
||||
// from an origin type (in the case of instances).
|
||||
//
|
||||
// After resolution, the type parameters, methods, and underlying type of n are
|
||||
// accessible; but if n is an instantiated type, its methods may still be
|
||||
// unexpanded.
|
||||
// For the purposes of resolution, there are three categories of named types:
|
||||
// 1. Instantiated Types
|
||||
// 2. Lazy Loaded Types
|
||||
// 3. All Others
|
||||
//
|
||||
// Note that the above form a partition.
|
||||
//
|
||||
// Instantiated types:
|
||||
// Type parameters, methods, and underlying type of n become accessible,
|
||||
// though methods are lazily populated as needed.
|
||||
//
|
||||
// Lazy loaded types:
|
||||
// Type parameters, methods, and underlying type of n become accessible
|
||||
// and are fully expanded.
|
||||
//
|
||||
// All others:
|
||||
// Effectively, nothing happens. The underlying type of n may still be
|
||||
// a named type.
|
||||
func (n *Named) resolve() *Named {
|
||||
if n.state() > unresolved { // avoid locking below
|
||||
return n
|
||||
|
|
|
|||
|
|
@ -804,7 +804,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
|||
}
|
||||
case ssa.Op386LoweredPanicBoundsCR:
|
||||
yIsReg = true
|
||||
yVal := int(v.Args[0].Reg() - x86.REG_AX)
|
||||
yVal = int(v.Args[0].Reg() - x86.REG_AX)
|
||||
c := v.Aux.(ssa.PanicBoundsC).C
|
||||
if c >= 0 && c <= abi.BoundsMaxConst {
|
||||
xVal = int(c)
|
||||
|
|
|
|||
1
src/cmd/dist/buildtool.go
vendored
1
src/cmd/dist/buildtool.go
vendored
|
|
@ -49,6 +49,7 @@ var bootstrapDirs = []string{
|
|||
"cmd/internal/macho",
|
||||
"cmd/internal/obj/...",
|
||||
"cmd/internal/objabi",
|
||||
"cmd/internal/par",
|
||||
"cmd/internal/pgo",
|
||||
"cmd/internal/pkgpath",
|
||||
"cmd/internal/quoted",
|
||||
|
|
|
|||
2
src/cmd/dist/test.go
vendored
2
src/cmd/dist/test.go
vendored
|
|
@ -1213,7 +1213,7 @@ func (t *tester) internalLinkPIE() bool {
|
|||
case "darwin-amd64", "darwin-arm64",
|
||||
"linux-amd64", "linux-arm64", "linux-loong64", "linux-ppc64le",
|
||||
"android-arm64",
|
||||
"windows-amd64", "windows-386", "windows-arm":
|
||||
"windows-amd64", "windows-386":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
|
|
|||
|
|
@ -6,9 +6,6 @@ package main
|
|||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
@ -18,130 +15,11 @@ func init() {
|
|||
var cftypeFix = fix{
|
||||
name: "cftype",
|
||||
date: "2017-09-27",
|
||||
f: cftypefix,
|
||||
desc: `Fixes initializers and casts of C.*Ref and JNI types`,
|
||||
f: noop,
|
||||
desc: `Fixes initializers and casts of C.*Ref and JNI types (removed)`,
|
||||
disabled: false,
|
||||
}
|
||||
|
||||
// Old state:
|
||||
//
|
||||
// type CFTypeRef unsafe.Pointer
|
||||
//
|
||||
// New state:
|
||||
//
|
||||
// type CFTypeRef uintptr
|
||||
//
|
||||
// and similar for other *Ref types.
|
||||
// This fix finds nils initializing these types and replaces the nils with 0s.
|
||||
func cftypefix(f *ast.File) bool {
|
||||
return typefix(f, func(s string) bool {
|
||||
return strings.HasPrefix(s, "C.") && strings.HasSuffix(s, "Ref") && s != "C.CFAllocatorRef"
|
||||
})
|
||||
}
|
||||
|
||||
// typefix replaces nil with 0 for all nils whose type, when passed to badType, returns true.
|
||||
func typefix(f *ast.File, badType func(string) bool) bool {
|
||||
if !imports(f, "C") {
|
||||
return false
|
||||
}
|
||||
typeof, _ := typecheck(&TypeConfig{}, f)
|
||||
changed := false
|
||||
|
||||
// step 1: Find all the nils with the offending types.
|
||||
// Compute their replacement.
|
||||
badNils := map[any]ast.Expr{}
|
||||
walk(f, func(n any) {
|
||||
if i, ok := n.(*ast.Ident); ok && i.Name == "nil" && badType(typeof[n]) {
|
||||
badNils[n] = &ast.BasicLit{ValuePos: i.NamePos, Kind: token.INT, Value: "0"}
|
||||
}
|
||||
})
|
||||
|
||||
// step 2: find all uses of the bad nils, replace them with 0.
|
||||
// There's no easy way to map from an ast.Expr to all the places that use them, so
|
||||
// we use reflect to find all such references.
|
||||
if len(badNils) > 0 {
|
||||
exprType := reflect.TypeFor[ast.Expr]()
|
||||
exprSliceType := reflect.TypeFor[[]ast.Expr]()
|
||||
walk(f, func(n any) {
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
v := reflect.ValueOf(n)
|
||||
if v.Kind() != reflect.Pointer {
|
||||
return
|
||||
}
|
||||
if v.IsNil() {
|
||||
return
|
||||
}
|
||||
v = v.Elem()
|
||||
if v.Kind() != reflect.Struct {
|
||||
return
|
||||
}
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
f := v.Field(i)
|
||||
if f.Type() == exprType {
|
||||
if r := badNils[f.Interface()]; r != nil {
|
||||
f.Set(reflect.ValueOf(r))
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
if f.Type() == exprSliceType {
|
||||
for j := 0; j < f.Len(); j++ {
|
||||
e := f.Index(j)
|
||||
if r := badNils[e.Interface()]; r != nil {
|
||||
e.Set(reflect.ValueOf(r))
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// step 3: fix up invalid casts.
|
||||
// It used to be ok to cast between *unsafe.Pointer and *C.CFTypeRef in a single step.
|
||||
// Now we need unsafe.Pointer as an intermediate cast.
|
||||
// (*unsafe.Pointer)(x) where x is type *bad -> (*unsafe.Pointer)(unsafe.Pointer(x))
|
||||
// (*bad.type)(x) where x is type *unsafe.Pointer -> (*bad.type)(unsafe.Pointer(x))
|
||||
walk(f, func(n any) {
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
// Find pattern like (*a.b)(x)
|
||||
c, ok := n.(*ast.CallExpr)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if len(c.Args) != 1 {
|
||||
return
|
||||
}
|
||||
p, ok := c.Fun.(*ast.ParenExpr)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
s, ok := p.X.(*ast.StarExpr)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
t, ok := s.X.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
pkg, ok := t.X.(*ast.Ident)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
dst := pkg.Name + "." + t.Sel.Name
|
||||
src := typeof[c.Args[0]]
|
||||
if badType(dst) && src == "*unsafe.Pointer" ||
|
||||
dst == "unsafe.Pointer" && strings.HasPrefix(src, "*") && badType(src[1:]) {
|
||||
c.Args[0] = &ast.CallExpr{
|
||||
Fun: &ast.SelectorExpr{X: &ast.Ident{Name: "unsafe"}, Sel: &ast.Ident{Name: "Pointer"}},
|
||||
Args: []ast.Expr{c.Args[0]},
|
||||
}
|
||||
changed = true
|
||||
}
|
||||
})
|
||||
|
||||
return changed
|
||||
func noop(f *ast.File) bool {
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,241 +0,0 @@
|
|||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
func init() {
|
||||
addTestCases(cftypeTests, cftypefix)
|
||||
}
|
||||
|
||||
var cftypeTests = []testCase{
|
||||
{
|
||||
Name: "cftype.localVariable",
|
||||
In: `package main
|
||||
|
||||
// typedef const void *CFTypeRef;
|
||||
import "C"
|
||||
|
||||
func f() {
|
||||
var x C.CFTypeRef = nil
|
||||
x = nil
|
||||
x, x = nil, nil
|
||||
}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef const void *CFTypeRef;
|
||||
import "C"
|
||||
|
||||
func f() {
|
||||
var x C.CFTypeRef = 0
|
||||
x = 0
|
||||
x, x = 0, 0
|
||||
}
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "cftype.globalVariable",
|
||||
In: `package main
|
||||
|
||||
// typedef const void *CFTypeRef;
|
||||
import "C"
|
||||
|
||||
var x C.CFTypeRef = nil
|
||||
|
||||
func f() {
|
||||
x = nil
|
||||
}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef const void *CFTypeRef;
|
||||
import "C"
|
||||
|
||||
var x C.CFTypeRef = 0
|
||||
|
||||
func f() {
|
||||
x = 0
|
||||
}
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "cftype.EqualArgument",
|
||||
In: `package main
|
||||
|
||||
// typedef const void *CFTypeRef;
|
||||
import "C"
|
||||
|
||||
var x C.CFTypeRef
|
||||
var y = x == nil
|
||||
var z = x != nil
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef const void *CFTypeRef;
|
||||
import "C"
|
||||
|
||||
var x C.CFTypeRef
|
||||
var y = x == 0
|
||||
var z = x != 0
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "cftype.StructField",
|
||||
In: `package main
|
||||
|
||||
// typedef const void *CFTypeRef;
|
||||
import "C"
|
||||
|
||||
type T struct {
|
||||
x C.CFTypeRef
|
||||
}
|
||||
|
||||
var t = T{x: nil}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef const void *CFTypeRef;
|
||||
import "C"
|
||||
|
||||
type T struct {
|
||||
x C.CFTypeRef
|
||||
}
|
||||
|
||||
var t = T{x: 0}
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "cftype.FunctionArgument",
|
||||
In: `package main
|
||||
|
||||
// typedef const void *CFTypeRef;
|
||||
import "C"
|
||||
|
||||
func f(x C.CFTypeRef) {
|
||||
}
|
||||
|
||||
func g() {
|
||||
f(nil)
|
||||
}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef const void *CFTypeRef;
|
||||
import "C"
|
||||
|
||||
func f(x C.CFTypeRef) {
|
||||
}
|
||||
|
||||
func g() {
|
||||
f(0)
|
||||
}
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "cftype.ArrayElement",
|
||||
In: `package main
|
||||
|
||||
// typedef const void *CFTypeRef;
|
||||
import "C"
|
||||
|
||||
var x = [3]C.CFTypeRef{nil, nil, nil}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef const void *CFTypeRef;
|
||||
import "C"
|
||||
|
||||
var x = [3]C.CFTypeRef{0, 0, 0}
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "cftype.SliceElement",
|
||||
In: `package main
|
||||
|
||||
// typedef const void *CFTypeRef;
|
||||
import "C"
|
||||
|
||||
var x = []C.CFTypeRef{nil, nil, nil}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef const void *CFTypeRef;
|
||||
import "C"
|
||||
|
||||
var x = []C.CFTypeRef{0, 0, 0}
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "cftype.MapKey",
|
||||
In: `package main
|
||||
|
||||
// typedef const void *CFTypeRef;
|
||||
import "C"
|
||||
|
||||
var x = map[C.CFTypeRef]int{nil: 0}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef const void *CFTypeRef;
|
||||
import "C"
|
||||
|
||||
var x = map[C.CFTypeRef]int{0: 0}
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "cftype.MapValue",
|
||||
In: `package main
|
||||
|
||||
// typedef const void *CFTypeRef;
|
||||
import "C"
|
||||
|
||||
var x = map[int]C.CFTypeRef{0: nil}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef const void *CFTypeRef;
|
||||
import "C"
|
||||
|
||||
var x = map[int]C.CFTypeRef{0: 0}
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "cftype.Conversion1",
|
||||
In: `package main
|
||||
|
||||
// typedef const void *CFTypeRef;
|
||||
import "C"
|
||||
|
||||
var x C.CFTypeRef
|
||||
var y = (*unsafe.Pointer)(&x)
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef const void *CFTypeRef;
|
||||
import "C"
|
||||
|
||||
var x C.CFTypeRef
|
||||
var y = (*unsafe.Pointer)(unsafe.Pointer(&x))
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "cftype.Conversion2",
|
||||
In: `package main
|
||||
|
||||
// typedef const void *CFTypeRef;
|
||||
import "C"
|
||||
|
||||
var x unsafe.Pointer
|
||||
var y = (*C.CFTypeRef)(&x)
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef const void *CFTypeRef;
|
||||
import "C"
|
||||
|
||||
var x unsafe.Pointer
|
||||
var y = (*C.CFTypeRef)(unsafe.Pointer(&x))
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
|
@ -4,10 +4,6 @@
|
|||
|
||||
package main
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register(contextFix)
|
||||
}
|
||||
|
|
@ -15,11 +11,7 @@ func init() {
|
|||
var contextFix = fix{
|
||||
name: "context",
|
||||
date: "2016-09-09",
|
||||
f: ctxfix,
|
||||
desc: `Change imports of golang.org/x/net/context to context`,
|
||||
f: noop,
|
||||
desc: `Change imports of golang.org/x/net/context to context (removed)`,
|
||||
disabled: false,
|
||||
}
|
||||
|
||||
func ctxfix(f *ast.File) bool {
|
||||
return rewriteImport(f, "golang.org/x/net/context", "context")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,42 +0,0 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
func init() {
|
||||
addTestCases(contextTests, ctxfix)
|
||||
}
|
||||
|
||||
var contextTests = []testCase{
|
||||
{
|
||||
Name: "context.0",
|
||||
In: `package main
|
||||
|
||||
import "golang.org/x/net/context"
|
||||
|
||||
var _ = "golang.org/x/net/context"
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
import "context"
|
||||
|
||||
var _ = "golang.org/x/net/context"
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "context.1",
|
||||
In: `package main
|
||||
|
||||
import ctx "golang.org/x/net/context"
|
||||
|
||||
var _ = ctx.Background()
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
import ctx "context"
|
||||
|
||||
var _ = ctx.Background()
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
|
@ -4,10 +4,6 @@
|
|||
|
||||
package main
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register(eglFixDisplay)
|
||||
register(eglFixConfig)
|
||||
|
|
@ -16,45 +12,15 @@ func init() {
|
|||
var eglFixDisplay = fix{
|
||||
name: "egl",
|
||||
date: "2018-12-15",
|
||||
f: eglfixDisp,
|
||||
desc: `Fixes initializers of EGLDisplay`,
|
||||
f: noop,
|
||||
desc: `Fixes initializers of EGLDisplay (removed)`,
|
||||
disabled: false,
|
||||
}
|
||||
|
||||
// Old state:
|
||||
//
|
||||
// type EGLDisplay unsafe.Pointer
|
||||
//
|
||||
// New state:
|
||||
//
|
||||
// type EGLDisplay uintptr
|
||||
//
|
||||
// This fix finds nils initializing these types and replaces the nils with 0s.
|
||||
func eglfixDisp(f *ast.File) bool {
|
||||
return typefix(f, func(s string) bool {
|
||||
return s == "C.EGLDisplay"
|
||||
})
|
||||
}
|
||||
|
||||
var eglFixConfig = fix{
|
||||
name: "eglconf",
|
||||
date: "2020-05-30",
|
||||
f: eglfixConfig,
|
||||
desc: `Fixes initializers of EGLConfig`,
|
||||
f: noop,
|
||||
desc: `Fixes initializers of EGLConfig (removed)`,
|
||||
disabled: false,
|
||||
}
|
||||
|
||||
// Old state:
|
||||
//
|
||||
// type EGLConfig unsafe.Pointer
|
||||
//
|
||||
// New state:
|
||||
//
|
||||
// type EGLConfig uintptr
|
||||
//
|
||||
// This fix finds nils initializing these types and replaces the nils with 0s.
|
||||
func eglfixConfig(f *ast.File) bool {
|
||||
return typefix(f, func(s string) bool {
|
||||
return s == "C.EGLConfig"
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,214 +0,0 @@
|
|||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import "strings"
|
||||
|
||||
func init() {
|
||||
addTestCases(eglTestsFor("EGLDisplay"), eglfixDisp)
|
||||
addTestCases(eglTestsFor("EGLConfig"), eglfixConfig)
|
||||
}
|
||||
|
||||
func eglTestsFor(tname string) []testCase {
|
||||
var eglTests = []testCase{
|
||||
{
|
||||
Name: "egl.localVariable",
|
||||
In: `package main
|
||||
|
||||
// typedef void *$EGLTYPE;
|
||||
import "C"
|
||||
|
||||
func f() {
|
||||
var x C.$EGLTYPE = nil
|
||||
x = nil
|
||||
x, x = nil, nil
|
||||
}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef void *$EGLTYPE;
|
||||
import "C"
|
||||
|
||||
func f() {
|
||||
var x C.$EGLTYPE = 0
|
||||
x = 0
|
||||
x, x = 0, 0
|
||||
}
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "egl.globalVariable",
|
||||
In: `package main
|
||||
|
||||
// typedef void *$EGLTYPE;
|
||||
import "C"
|
||||
|
||||
var x C.$EGLTYPE = nil
|
||||
|
||||
func f() {
|
||||
x = nil
|
||||
}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef void *$EGLTYPE;
|
||||
import "C"
|
||||
|
||||
var x C.$EGLTYPE = 0
|
||||
|
||||
func f() {
|
||||
x = 0
|
||||
}
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "egl.EqualArgument",
|
||||
In: `package main
|
||||
|
||||
// typedef void *$EGLTYPE;
|
||||
import "C"
|
||||
|
||||
var x C.$EGLTYPE
|
||||
var y = x == nil
|
||||
var z = x != nil
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef void *$EGLTYPE;
|
||||
import "C"
|
||||
|
||||
var x C.$EGLTYPE
|
||||
var y = x == 0
|
||||
var z = x != 0
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "egl.StructField",
|
||||
In: `package main
|
||||
|
||||
// typedef void *$EGLTYPE;
|
||||
import "C"
|
||||
|
||||
type T struct {
|
||||
x C.$EGLTYPE
|
||||
}
|
||||
|
||||
var t = T{x: nil}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef void *$EGLTYPE;
|
||||
import "C"
|
||||
|
||||
type T struct {
|
||||
x C.$EGLTYPE
|
||||
}
|
||||
|
||||
var t = T{x: 0}
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "egl.FunctionArgument",
|
||||
In: `package main
|
||||
|
||||
// typedef void *$EGLTYPE;
|
||||
import "C"
|
||||
|
||||
func f(x C.$EGLTYPE) {
|
||||
}
|
||||
|
||||
func g() {
|
||||
f(nil)
|
||||
}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef void *$EGLTYPE;
|
||||
import "C"
|
||||
|
||||
func f(x C.$EGLTYPE) {
|
||||
}
|
||||
|
||||
func g() {
|
||||
f(0)
|
||||
}
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "egl.ArrayElement",
|
||||
In: `package main
|
||||
|
||||
// typedef void *$EGLTYPE;
|
||||
import "C"
|
||||
|
||||
var x = [3]C.$EGLTYPE{nil, nil, nil}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef void *$EGLTYPE;
|
||||
import "C"
|
||||
|
||||
var x = [3]C.$EGLTYPE{0, 0, 0}
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "egl.SliceElement",
|
||||
In: `package main
|
||||
|
||||
// typedef void *$EGLTYPE;
|
||||
import "C"
|
||||
|
||||
var x = []C.$EGLTYPE{nil, nil, nil}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef void *$EGLTYPE;
|
||||
import "C"
|
||||
|
||||
var x = []C.$EGLTYPE{0, 0, 0}
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "egl.MapKey",
|
||||
In: `package main
|
||||
|
||||
// typedef void *$EGLTYPE;
|
||||
import "C"
|
||||
|
||||
var x = map[C.$EGLTYPE]int{nil: 0}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef void *$EGLTYPE;
|
||||
import "C"
|
||||
|
||||
var x = map[C.$EGLTYPE]int{0: 0}
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "egl.MapValue",
|
||||
In: `package main
|
||||
|
||||
// typedef void *$EGLTYPE;
|
||||
import "C"
|
||||
|
||||
var x = map[int]C.$EGLTYPE{0: nil}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef void *$EGLTYPE;
|
||||
import "C"
|
||||
|
||||
var x = map[int]C.$EGLTYPE{0: 0}
|
||||
`,
|
||||
},
|
||||
}
|
||||
for i := range eglTests {
|
||||
t := &eglTests[i]
|
||||
t.In = strings.ReplaceAll(t.In, "$EGLTYPE", tname)
|
||||
t.Out = strings.ReplaceAll(t.Out, "$EGLTYPE", tname)
|
||||
}
|
||||
return eglTests
|
||||
}
|
||||
|
|
@ -4,11 +4,6 @@
|
|||
|
||||
package main
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register(gotypesFix)
|
||||
}
|
||||
|
|
@ -16,60 +11,6 @@ func init() {
|
|||
var gotypesFix = fix{
|
||||
name: "gotypes",
|
||||
date: "2015-07-16",
|
||||
f: gotypes,
|
||||
desc: `Change imports of golang.org/x/tools/go/{exact,types} to go/{constant,types}`,
|
||||
}
|
||||
|
||||
func gotypes(f *ast.File) bool {
|
||||
fixed := fixGoTypes(f)
|
||||
if fixGoExact(f) {
|
||||
fixed = true
|
||||
}
|
||||
return fixed
|
||||
}
|
||||
|
||||
func fixGoTypes(f *ast.File) bool {
|
||||
return rewriteImport(f, "golang.org/x/tools/go/types", "go/types")
|
||||
}
|
||||
|
||||
func fixGoExact(f *ast.File) bool {
|
||||
// This one is harder because the import name changes.
|
||||
// First find the import spec.
|
||||
var importSpec *ast.ImportSpec
|
||||
walk(f, func(n any) {
|
||||
if importSpec != nil {
|
||||
return
|
||||
}
|
||||
spec, ok := n.(*ast.ImportSpec)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
path, err := strconv.Unquote(spec.Path.Value)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if path == "golang.org/x/tools/go/exact" {
|
||||
importSpec = spec
|
||||
}
|
||||
|
||||
})
|
||||
if importSpec == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// We are about to rename exact.* to constant.*, but constant is a common
|
||||
// name. See if it will conflict. This is a hack but it is effective.
|
||||
exists := renameTop(f, "constant", "constant")
|
||||
suffix := ""
|
||||
if exists {
|
||||
suffix = "_"
|
||||
}
|
||||
// Now we need to rename all the uses of the import. RewriteImport
|
||||
// affects renameTop, but not vice versa, so do them in this order.
|
||||
renameTop(f, "exact", "constant"+suffix)
|
||||
rewriteImport(f, "golang.org/x/tools/go/exact", "go/constant")
|
||||
// renameTop will also rewrite the imported package name. Fix that;
|
||||
// we know it should be missing.
|
||||
importSpec.Name = nil
|
||||
return true
|
||||
f: noop,
|
||||
desc: `Change imports of golang.org/x/tools/go/{exact,types} to go/{constant,types} (removed)`,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,89 +0,0 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
func init() {
|
||||
addTestCases(gotypesTests, gotypes)
|
||||
}
|
||||
|
||||
var gotypesTests = []testCase{
|
||||
{
|
||||
Name: "gotypes.0",
|
||||
In: `package main
|
||||
|
||||
import "golang.org/x/tools/go/types"
|
||||
import "golang.org/x/tools/go/exact"
|
||||
|
||||
var _ = exact.Kind
|
||||
|
||||
func f() {
|
||||
_ = exact.MakeBool(true)
|
||||
}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
import "go/types"
|
||||
import "go/constant"
|
||||
|
||||
var _ = constant.Kind
|
||||
|
||||
func f() {
|
||||
_ = constant.MakeBool(true)
|
||||
}
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "gotypes.1",
|
||||
In: `package main
|
||||
|
||||
import "golang.org/x/tools/go/types"
|
||||
import foo "golang.org/x/tools/go/exact"
|
||||
|
||||
var _ = foo.Kind
|
||||
|
||||
func f() {
|
||||
_ = foo.MakeBool(true)
|
||||
}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
import "go/types"
|
||||
import "go/constant"
|
||||
|
||||
var _ = foo.Kind
|
||||
|
||||
func f() {
|
||||
_ = foo.MakeBool(true)
|
||||
}
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "gotypes.0",
|
||||
In: `package main
|
||||
|
||||
import "golang.org/x/tools/go/types"
|
||||
import "golang.org/x/tools/go/exact"
|
||||
|
||||
var _ = exact.Kind
|
||||
var constant = 23 // Use of new package name.
|
||||
|
||||
func f() {
|
||||
_ = exact.MakeBool(true)
|
||||
}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
import "go/types"
|
||||
import "go/constant"
|
||||
|
||||
var _ = constant_.Kind
|
||||
var constant = 23 // Use of new package name.
|
||||
|
||||
func f() {
|
||||
_ = constant_.MakeBool(true)
|
||||
}
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
|
@ -4,10 +4,6 @@
|
|||
|
||||
package main
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register(jniFix)
|
||||
}
|
||||
|
|
@ -15,55 +11,7 @@ func init() {
|
|||
var jniFix = fix{
|
||||
name: "jni",
|
||||
date: "2017-12-04",
|
||||
f: jnifix,
|
||||
desc: `Fixes initializers of JNI's jobject and subtypes`,
|
||||
f: noop,
|
||||
desc: `Fixes initializers of JNI's jobject and subtypes (removed)`,
|
||||
disabled: false,
|
||||
}
|
||||
|
||||
// Old state:
|
||||
//
|
||||
// type jobject *_jobject
|
||||
//
|
||||
// New state:
|
||||
//
|
||||
// type jobject uintptr
|
||||
//
|
||||
// and similar for subtypes of jobject.
|
||||
// This fix finds nils initializing these types and replaces the nils with 0s.
|
||||
func jnifix(f *ast.File) bool {
|
||||
return typefix(f, func(s string) bool {
|
||||
switch s {
|
||||
case "C.jobject":
|
||||
return true
|
||||
case "C.jclass":
|
||||
return true
|
||||
case "C.jthrowable":
|
||||
return true
|
||||
case "C.jstring":
|
||||
return true
|
||||
case "C.jarray":
|
||||
return true
|
||||
case "C.jbooleanArray":
|
||||
return true
|
||||
case "C.jbyteArray":
|
||||
return true
|
||||
case "C.jcharArray":
|
||||
return true
|
||||
case "C.jshortArray":
|
||||
return true
|
||||
case "C.jintArray":
|
||||
return true
|
||||
case "C.jlongArray":
|
||||
return true
|
||||
case "C.jfloatArray":
|
||||
return true
|
||||
case "C.jdoubleArray":
|
||||
return true
|
||||
case "C.jobjectArray":
|
||||
return true
|
||||
case "C.jweak":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,203 +0,0 @@
|
|||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
func init() {
|
||||
addTestCases(jniTests, jnifix)
|
||||
}
|
||||
|
||||
var jniTests = []testCase{
|
||||
{
|
||||
Name: "jni.localVariable",
|
||||
In: `package main
|
||||
|
||||
// typedef struct _jobject* jobject;
|
||||
import "C"
|
||||
|
||||
func f() {
|
||||
var x C.jobject = nil
|
||||
x = nil
|
||||
x, x = nil, nil
|
||||
}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef struct _jobject* jobject;
|
||||
import "C"
|
||||
|
||||
func f() {
|
||||
var x C.jobject = 0
|
||||
x = 0
|
||||
x, x = 0, 0
|
||||
}
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "jni.globalVariable",
|
||||
In: `package main
|
||||
|
||||
// typedef struct _jobject* jobject;
|
||||
import "C"
|
||||
|
||||
var x C.jobject = nil
|
||||
|
||||
func f() {
|
||||
x = nil
|
||||
}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef struct _jobject* jobject;
|
||||
import "C"
|
||||
|
||||
var x C.jobject = 0
|
||||
|
||||
func f() {
|
||||
x = 0
|
||||
}
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "jni.EqualArgument",
|
||||
In: `package main
|
||||
|
||||
// typedef struct _jobject* jobject;
|
||||
import "C"
|
||||
|
||||
var x C.jobject
|
||||
var y = x == nil
|
||||
var z = x != nil
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef struct _jobject* jobject;
|
||||
import "C"
|
||||
|
||||
var x C.jobject
|
||||
var y = x == 0
|
||||
var z = x != 0
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "jni.StructField",
|
||||
In: `package main
|
||||
|
||||
// typedef struct _jobject* jobject;
|
||||
import "C"
|
||||
|
||||
type T struct {
|
||||
x C.jobject
|
||||
}
|
||||
|
||||
var t = T{x: nil}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef struct _jobject* jobject;
|
||||
import "C"
|
||||
|
||||
type T struct {
|
||||
x C.jobject
|
||||
}
|
||||
|
||||
var t = T{x: 0}
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "jni.FunctionArgument",
|
||||
In: `package main
|
||||
|
||||
// typedef struct _jobject* jobject;
|
||||
import "C"
|
||||
|
||||
func f(x C.jobject) {
|
||||
}
|
||||
|
||||
func g() {
|
||||
f(nil)
|
||||
}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef struct _jobject* jobject;
|
||||
import "C"
|
||||
|
||||
func f(x C.jobject) {
|
||||
}
|
||||
|
||||
func g() {
|
||||
f(0)
|
||||
}
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "jni.ArrayElement",
|
||||
In: `package main
|
||||
|
||||
// typedef struct _jobject* jobject;
|
||||
import "C"
|
||||
|
||||
var x = [3]C.jobject{nil, nil, nil}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef struct _jobject* jobject;
|
||||
import "C"
|
||||
|
||||
var x = [3]C.jobject{0, 0, 0}
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "jni.SliceElement",
|
||||
In: `package main
|
||||
|
||||
// typedef struct _jobject* jobject;
|
||||
import "C"
|
||||
|
||||
var x = []C.jobject{nil, nil, nil}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef struct _jobject* jobject;
|
||||
import "C"
|
||||
|
||||
var x = []C.jobject{0, 0, 0}
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "jni.MapKey",
|
||||
In: `package main
|
||||
|
||||
// typedef struct _jobject* jobject;
|
||||
import "C"
|
||||
|
||||
var x = map[C.jobject]int{nil: 0}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef struct _jobject* jobject;
|
||||
import "C"
|
||||
|
||||
var x = map[C.jobject]int{0: 0}
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "jni.MapValue",
|
||||
In: `package main
|
||||
|
||||
// typedef struct _jobject* jobject;
|
||||
import "C"
|
||||
|
||||
var x = map[int]C.jobject{0: nil}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
// typedef struct _jobject* jobject;
|
||||
import "C"
|
||||
|
||||
var x = map[int]C.jobject{0: 0}
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
|
@ -4,11 +4,6 @@
|
|||
|
||||
package main
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"slices"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register(netipv6zoneFix)
|
||||
}
|
||||
|
|
@ -16,56 +11,9 @@ func init() {
|
|||
var netipv6zoneFix = fix{
|
||||
name: "netipv6zone",
|
||||
date: "2012-11-26",
|
||||
f: netipv6zone,
|
||||
desc: `Adapt element key to IPAddr, UDPAddr or TCPAddr composite literals.
|
||||
f: noop,
|
||||
desc: `Adapt element key to IPAddr, UDPAddr or TCPAddr composite literals (removed).
|
||||
|
||||
https://codereview.appspot.com/6849045/
|
||||
`,
|
||||
}
|
||||
|
||||
func netipv6zone(f *ast.File) bool {
|
||||
if !imports(f, "net") {
|
||||
return false
|
||||
}
|
||||
|
||||
fixed := false
|
||||
walk(f, func(n any) {
|
||||
cl, ok := n.(*ast.CompositeLit)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
se, ok := cl.Type.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if !isTopName(se.X, "net") || se.Sel == nil {
|
||||
return
|
||||
}
|
||||
switch ss := se.Sel.String(); ss {
|
||||
case "IPAddr", "UDPAddr", "TCPAddr":
|
||||
for i, e := range cl.Elts {
|
||||
if _, ok := e.(*ast.KeyValueExpr); ok {
|
||||
break
|
||||
}
|
||||
switch i {
|
||||
case 0:
|
||||
cl.Elts[i] = &ast.KeyValueExpr{
|
||||
Key: ast.NewIdent("IP"),
|
||||
Value: e,
|
||||
}
|
||||
case 1:
|
||||
if elit, ok := e.(*ast.BasicLit); ok && elit.Value == "0" {
|
||||
cl.Elts = slices.Delete(cl.Elts, i, i+1)
|
||||
} else {
|
||||
cl.Elts[i] = &ast.KeyValueExpr{
|
||||
Key: ast.NewIdent("Port"),
|
||||
Value: e,
|
||||
}
|
||||
}
|
||||
}
|
||||
fixed = true
|
||||
}
|
||||
}
|
||||
})
|
||||
return fixed
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,43 +0,0 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
func init() {
|
||||
addTestCases(netipv6zoneTests, netipv6zone)
|
||||
}
|
||||
|
||||
var netipv6zoneTests = []testCase{
|
||||
{
|
||||
Name: "netipv6zone.0",
|
||||
In: `package main
|
||||
|
||||
import "net"
|
||||
|
||||
func f() net.Addr {
|
||||
a := &net.IPAddr{ip1}
|
||||
sub(&net.UDPAddr{ip2, 12345})
|
||||
c := &net.TCPAddr{IP: ip3, Port: 54321}
|
||||
d := &net.TCPAddr{ip4, 0}
|
||||
p := 1234
|
||||
e := &net.TCPAddr{ip4, p}
|
||||
return &net.TCPAddr{ip5}, nil
|
||||
}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
import "net"
|
||||
|
||||
func f() net.Addr {
|
||||
a := &net.IPAddr{IP: ip1}
|
||||
sub(&net.UDPAddr{IP: ip2, Port: 12345})
|
||||
c := &net.TCPAddr{IP: ip3, Port: 54321}
|
||||
d := &net.TCPAddr{IP: ip4}
|
||||
p := 1234
|
||||
e := &net.TCPAddr{IP: ip4, Port: p}
|
||||
return &net.TCPAddr{IP: ip5}, nil
|
||||
}
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
|
@ -4,8 +4,6 @@
|
|||
|
||||
package main
|
||||
|
||||
import "go/ast"
|
||||
|
||||
func init() {
|
||||
register(printerconfigFix)
|
||||
}
|
||||
|
|
@ -13,49 +11,6 @@ func init() {
|
|||
var printerconfigFix = fix{
|
||||
name: "printerconfig",
|
||||
date: "2012-12-11",
|
||||
f: printerconfig,
|
||||
desc: `Add element keys to Config composite literals.`,
|
||||
}
|
||||
|
||||
func printerconfig(f *ast.File) bool {
|
||||
if !imports(f, "go/printer") {
|
||||
return false
|
||||
}
|
||||
|
||||
fixed := false
|
||||
walk(f, func(n any) {
|
||||
cl, ok := n.(*ast.CompositeLit)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
se, ok := cl.Type.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if !isTopName(se.X, "printer") || se.Sel == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if ss := se.Sel.String(); ss == "Config" {
|
||||
for i, e := range cl.Elts {
|
||||
if _, ok := e.(*ast.KeyValueExpr); ok {
|
||||
break
|
||||
}
|
||||
switch i {
|
||||
case 0:
|
||||
cl.Elts[i] = &ast.KeyValueExpr{
|
||||
Key: ast.NewIdent("Mode"),
|
||||
Value: e,
|
||||
}
|
||||
case 1:
|
||||
cl.Elts[i] = &ast.KeyValueExpr{
|
||||
Key: ast.NewIdent("Tabwidth"),
|
||||
Value: e,
|
||||
}
|
||||
}
|
||||
fixed = true
|
||||
}
|
||||
}
|
||||
})
|
||||
return fixed
|
||||
f: noop,
|
||||
desc: `Add element keys to Config composite literals (removed).`,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,37 +0,0 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
func init() {
|
||||
addTestCases(printerconfigTests, printerconfig)
|
||||
}
|
||||
|
||||
var printerconfigTests = []testCase{
|
||||
{
|
||||
Name: "printerconfig.0",
|
||||
In: `package main
|
||||
|
||||
import "go/printer"
|
||||
|
||||
func f() printer.Config {
|
||||
b := printer.Config{0, 8}
|
||||
c := &printer.Config{0}
|
||||
d := &printer.Config{Tabwidth: 8, Mode: 0}
|
||||
return printer.Config{0, 8}
|
||||
}
|
||||
`,
|
||||
Out: `package main
|
||||
|
||||
import "go/printer"
|
||||
|
||||
func f() printer.Config {
|
||||
b := printer.Config{Mode: 0, Tabwidth: 8}
|
||||
c := &printer.Config{Mode: 0}
|
||||
d := &printer.Config{Tabwidth: 8, Mode: 0}
|
||||
return printer.Config{Mode: 0, Tabwidth: 8}
|
||||
}
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
|
@ -11,7 +11,7 @@ require (
|
|||
golang.org/x/sys v0.35.0
|
||||
golang.org/x/telemetry v0.0.0-20250807160809-1a19826ec488
|
||||
golang.org/x/term v0.34.0
|
||||
golang.org/x/tools v0.36.1-0.20250808220315-8866876b956f
|
||||
golang.org/x/tools v0.36.1-0.20250904192731-a09a2fba1c08
|
||||
)
|
||||
|
||||
require (
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
|
|||
golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
|
||||
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
|
||||
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
|
||||
golang.org/x/tools v0.36.1-0.20250808220315-8866876b956f h1:9m2Iptt9ZZU5llKDJy1XUl5d13PN1ZYV16KwOvE6jOw=
|
||||
golang.org/x/tools v0.36.1-0.20250808220315-8866876b956f/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
golang.org/x/tools v0.36.1-0.20250904192731-a09a2fba1c08 h1:KS/PXsrK6W9NdlNu8iuCiNb7KM8UFwsh8g1BUjJ9rww=
|
||||
golang.org/x/tools v0.36.1-0.20250904192731-a09a2fba1c08/go.mod h1:n+8pplxVZfXnmHBxWsfPnQRJ5vWroQDk+U2MFpjwtFY=
|
||||
rsc.io/markdown v0.0.0-20240306144322-0bf8f97ee8ef h1:mqLYrXCXYEZOop9/Dbo6RPX11539nwiCNBb1icVPmw8=
|
||||
rsc.io/markdown v0.0.0-20240306144322-0bf8f97ee8ef/go.mod h1:8xcPgWmwlZONN1D9bjxtHEjrUtSEa3fakVF8iaewYKQ=
|
||||
|
|
|
|||
|
|
@ -988,6 +988,15 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) {
|
|||
if len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 && cfg.BuildCoverPkg == nil {
|
||||
p.Internal.Cover.GenMeta = true
|
||||
}
|
||||
|
||||
// Set coverage mode before building actions because it needs to be set
|
||||
// before the first package build action for the package under test is
|
||||
// created and cached, so that we can create the coverage action for it.
|
||||
if cfg.BuildCover {
|
||||
if p.Internal.Cover.GenMeta {
|
||||
p.Internal.Cover.Mode = cfg.BuildCoverMode
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1116,11 +1125,6 @@ var windowsBadWords = []string{
|
|||
|
||||
func builderTest(b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts, p *load.Package, imported bool, writeCoverMetaAct *work.Action) (buildAction, runAction, printAction *work.Action, perr *load.Package, err error) {
|
||||
if len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 {
|
||||
if cfg.BuildCover {
|
||||
if p.Internal.Cover.GenMeta {
|
||||
p.Internal.Cover.Mode = cfg.BuildCoverMode
|
||||
}
|
||||
}
|
||||
build := b.CompileAction(work.ModeBuild, work.ModeBuild, p)
|
||||
run := &work.Action{
|
||||
Mode: "test run",
|
||||
|
|
@ -1188,7 +1192,9 @@ func builderTest(b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts,
|
|||
|
||||
testBinary := testBinaryName(p)
|
||||
|
||||
testDir := b.NewObjdir()
|
||||
// Set testdir to compile action's objdir.
|
||||
// so that the default file path stripping applies to _testmain.go.
|
||||
testDir := b.CompileAction(work.ModeBuild, work.ModeBuild, pmain).Objdir
|
||||
if err := b.BackgroundShell().Mkdir(testDir); err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
|
|
@ -1209,10 +1215,6 @@ func builderTest(b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts,
|
|||
}
|
||||
}
|
||||
|
||||
// Set compile objdir to testDir we've already created,
|
||||
// so that the default file path stripping applies to _testmain.go.
|
||||
b.CompileAction(work.ModeBuild, work.ModeBuild, pmain).Objdir = testDir
|
||||
|
||||
a := b.LinkAction(work.ModeBuild, work.ModeBuild, pmain)
|
||||
a.Target = testDir + testBinary + cfg.ExeSuffix
|
||||
if cfg.Goos == "windows" {
|
||||
|
|
|
|||
|
|
@ -88,6 +88,8 @@ type Action struct {
|
|||
TestOutput *bytes.Buffer // test output buffer
|
||||
Args []string // additional args for runProgram
|
||||
|
||||
Provider any // Additional information to be passed to successive actions. Similar to a Bazel provider.
|
||||
|
||||
triggers []*Action // inverse of deps
|
||||
|
||||
buggyInstall bool // is this a buggy install (see -linkshared)?
|
||||
|
|
@ -448,26 +450,9 @@ func (b *Builder) AutoAction(mode, depMode BuildMode, p *load.Package) *Action {
|
|||
}
|
||||
|
||||
// buildActor implements the Actor interface for package build
|
||||
// actions. For most package builds this simply means invoking th
|
||||
// *Builder.build method; in the case of "go test -cover" for
|
||||
// a package with no test files, we stores some additional state
|
||||
// information in the build actor to help with reporting.
|
||||
type buildActor struct {
|
||||
// name of static meta-data file fragment emitted by the cover
|
||||
// tool as part of the package build action, for selected
|
||||
// "go test -cover" runs.
|
||||
covMetaFileName string
|
||||
}
|
||||
|
||||
// newBuildActor returns a new buildActor object, setting up the
|
||||
// covMetaFileName field if 'genCoverMeta' flag is set.
|
||||
func newBuildActor(p *load.Package, genCoverMeta bool) *buildActor {
|
||||
ba := &buildActor{}
|
||||
if genCoverMeta {
|
||||
ba.covMetaFileName = covcmd.MetaFileForPackage(p.ImportPath)
|
||||
}
|
||||
return ba
|
||||
}
|
||||
// actions. For most package builds this simply means invoking the
|
||||
// *Builder.build method.
|
||||
type buildActor struct{}
|
||||
|
||||
func (ba *buildActor) Act(b *Builder, ctx context.Context, a *Action) error {
|
||||
return b.build(ctx, a)
|
||||
|
|
@ -536,6 +521,63 @@ func (p *pgoActor) Act(b *Builder, ctx context.Context, a *Action) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
type checkCacheProvider struct {
|
||||
need uint32 // What work do successive actions within this package's build need to do? Combination of need bits used in build actions.
|
||||
}
|
||||
|
||||
// The actor to check the cache to determine what work needs to be done for the action.
|
||||
// It checks the cache and sets the need bits depending on the build mode and what's available
|
||||
// in the cache, so the cover and compile actions know what to do.
|
||||
// Currently, we don't cache the outputs of the individual actions composing the build
|
||||
// for a single package (such as the output of the cover actor) separately from the
|
||||
// output of the final build, but if we start doing so, we could schedule the run cgo
|
||||
// and cgo compile actions earlier because they wouldn't depend on the builds of the
|
||||
// dependencies of the package they belong to.
|
||||
type checkCacheActor struct {
|
||||
covMetaFileName string
|
||||
buildAction *Action
|
||||
}
|
||||
|
||||
func (cca *checkCacheActor) Act(b *Builder, ctx context.Context, a *Action) error {
|
||||
buildAction := cca.buildAction
|
||||
if buildAction.Mode == "build-install" {
|
||||
// (*Builder).installAction can rewrite the build action with its install action,
|
||||
// making the true build action its dependency. Fetch the build action in that case.
|
||||
buildAction = buildAction.Deps[0]
|
||||
}
|
||||
pr, err := b.checkCacheForBuild(a, buildAction, cca.covMetaFileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Provider = pr
|
||||
return nil
|
||||
}
|
||||
|
||||
type coverProvider struct {
|
||||
goSources, cgoSources []string // The go and cgo sources generated by the cover tool, which should be used instead of the raw sources on the package.
|
||||
}
|
||||
|
||||
// The actor to run the cover tool to produce instrumented source files for cover
|
||||
// builds. In the case of a package with no test files, we store some additional state
|
||||
// information in the build actor to help with reporting.
|
||||
type coverActor struct {
|
||||
// name of static meta-data file fragment emitted by the cover
|
||||
// tool as part of the package cover action, for selected
|
||||
// "go test -cover" runs.
|
||||
covMetaFileName string
|
||||
|
||||
buildAction *Action
|
||||
}
|
||||
|
||||
func (ca *coverActor) Act(b *Builder, ctx context.Context, a *Action) error {
|
||||
pr, err := b.runCover(a, ca.buildAction, a.Objdir, a.Package.GoFiles, a.Package.CgoFiles)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Provider = pr
|
||||
return nil
|
||||
}
|
||||
|
||||
// CompileAction returns the action for compiling and possibly installing
|
||||
// (according to mode) the given package. The resulting action is only
|
||||
// for building packages (archives), never for linking executables.
|
||||
|
|
@ -559,7 +601,7 @@ func (b *Builder) CompileAction(mode, depMode BuildMode, p *load.Package) *Actio
|
|||
a := &Action{
|
||||
Mode: "build",
|
||||
Package: p,
|
||||
Actor: newBuildActor(p, p.Internal.Cover.GenMeta),
|
||||
Actor: &buildActor{},
|
||||
Objdir: b.NewObjdir(),
|
||||
}
|
||||
|
||||
|
|
@ -602,6 +644,39 @@ func (b *Builder) CompileAction(mode, depMode BuildMode, p *load.Package) *Actio
|
|||
}
|
||||
}
|
||||
|
||||
// Determine the covmeta file name.
|
||||
var covMetaFileName string
|
||||
if p.Internal.Cover.GenMeta {
|
||||
covMetaFileName = covcmd.MetaFileForPackage(p.ImportPath)
|
||||
}
|
||||
|
||||
// Create a cache action.
|
||||
cacheAction := &Action{
|
||||
Mode: "build check cache",
|
||||
Package: p,
|
||||
Actor: &checkCacheActor{buildAction: a, covMetaFileName: covMetaFileName},
|
||||
Objdir: a.Objdir,
|
||||
Deps: a.Deps, // Need outputs of dependency build actions to generate action id.
|
||||
}
|
||||
a.Deps = append(a.Deps, cacheAction)
|
||||
|
||||
// Create a cover action if we need to instrument the code for coverage.
|
||||
// The cover action always runs in the same go build invocation as the build,
|
||||
// and is not cached separately, so it can use the same objdir.
|
||||
var coverAction *Action
|
||||
if p.Internal.Cover.Mode != "" {
|
||||
coverAction = b.cacheAction("cover", p, func() *Action {
|
||||
return &Action{
|
||||
Mode: "cover",
|
||||
Package: p,
|
||||
Actor: &coverActor{buildAction: a, covMetaFileName: covMetaFileName},
|
||||
Objdir: a.Objdir,
|
||||
Deps: []*Action{cacheAction},
|
||||
}
|
||||
})
|
||||
a.Deps = append(a.Deps, coverAction)
|
||||
}
|
||||
|
||||
return a
|
||||
})
|
||||
|
||||
|
|
|
|||
|
|
@ -473,10 +473,12 @@ const (
|
|||
needStale
|
||||
)
|
||||
|
||||
// build is the action for building a single package.
|
||||
// Note that any new influence on this logic must be reported in b.buildActionID above as well.
|
||||
func (b *Builder) build(ctx context.Context, a *Action) (err error) {
|
||||
p := a.Package
|
||||
// checkCacheForBuild checks the cache for the outputs of the buildAction to determine
|
||||
// what work needs to be done by it and the actions preceding it. a is the action
|
||||
// currently being run, which has an actor of type *checkCacheActor and is a dependency
|
||||
// of the buildAction.
|
||||
func (b *Builder) checkCacheForBuild(a, buildAction *Action, covMetaFileName string) (_ *checkCacheProvider, err error) {
|
||||
p := buildAction.Package
|
||||
sh := b.Shell(a)
|
||||
|
||||
bit := func(x uint32, b bool) uint32 {
|
||||
|
|
@ -488,28 +490,31 @@ func (b *Builder) build(ctx context.Context, a *Action) (err error) {
|
|||
|
||||
cachedBuild := false
|
||||
needCovMeta := p.Internal.Cover.GenMeta
|
||||
need := bit(needBuild, !b.IsCmdList && a.needBuild || b.NeedExport) |
|
||||
bit(needCgoHdr, b.needCgoHdr(a)) |
|
||||
bit(needVet, a.needVet) |
|
||||
need := bit(needBuild, !b.IsCmdList && buildAction.needBuild || b.NeedExport) |
|
||||
bit(needCgoHdr, b.needCgoHdr(buildAction)) |
|
||||
bit(needVet, buildAction.needVet) |
|
||||
bit(needCovMetaFile, needCovMeta) |
|
||||
bit(needCompiledGoFiles, b.NeedCompiledGoFiles)
|
||||
|
||||
if !p.BinaryOnly {
|
||||
if b.useCache(a, b.buildActionID(a), p.Target, need&needBuild != 0) {
|
||||
// We pass 'a' (this checkCacheAction) to buildActionID so that we use its dependencies,
|
||||
// which are the actual package dependencies, rather than the buildAction's dependencies
|
||||
// which also includes this action and the cover action.
|
||||
if b.useCache(buildAction, b.buildActionID(a), p.Target, need&needBuild != 0) {
|
||||
// We found the main output in the cache.
|
||||
// If we don't need any other outputs, we can stop.
|
||||
// Otherwise, we need to write files to a.Objdir (needVet, needCgoHdr).
|
||||
// Remember that we might have them in cache
|
||||
// and check again after we create a.Objdir.
|
||||
cachedBuild = true
|
||||
a.output = []byte{} // start saving output in case we miss any cache results
|
||||
buildAction.output = []byte{} // start saving output in case we miss any cache results
|
||||
need &^= needBuild
|
||||
if b.NeedExport {
|
||||
p.Export = a.built
|
||||
p.BuildID = a.buildID
|
||||
p.Export = buildAction.built
|
||||
p.BuildID = buildAction.buildID
|
||||
}
|
||||
if need&needCompiledGoFiles != 0 {
|
||||
if err := b.loadCachedCompiledGoFiles(a); err == nil {
|
||||
if err := b.loadCachedCompiledGoFiles(buildAction); err == nil {
|
||||
need &^= needCompiledGoFiles
|
||||
}
|
||||
}
|
||||
|
|
@ -518,13 +523,13 @@ func (b *Builder) build(ctx context.Context, a *Action) (err error) {
|
|||
// Source files might be cached, even if the full action is not
|
||||
// (e.g., go list -compiled -find).
|
||||
if !cachedBuild && need&needCompiledGoFiles != 0 {
|
||||
if err := b.loadCachedCompiledGoFiles(a); err == nil {
|
||||
if err := b.loadCachedCompiledGoFiles(buildAction); err == nil {
|
||||
need &^= needCompiledGoFiles
|
||||
}
|
||||
}
|
||||
|
||||
if need == 0 {
|
||||
return nil
|
||||
return &checkCacheProvider{need: need}, nil
|
||||
}
|
||||
defer b.flushOutput(a)
|
||||
}
|
||||
|
|
@ -534,6 +539,175 @@ func (b *Builder) build(ctx context.Context, a *Action) (err error) {
|
|||
p.Error = &load.PackageError{Err: err}
|
||||
}
|
||||
}()
|
||||
|
||||
if p.Error != nil {
|
||||
// Don't try to build anything for packages with errors. There may be a
|
||||
// problem with the inputs that makes the package unsafe to build.
|
||||
return nil, p.Error
|
||||
}
|
||||
|
||||
// TODO(matloob): return early for binary-only packages so that we don't need to indent
|
||||
// the core of this function in the if !p.BinaryOnly block above.
|
||||
if p.BinaryOnly {
|
||||
p.Stale = true
|
||||
p.StaleReason = "binary-only packages are no longer supported"
|
||||
if b.IsCmdList {
|
||||
return &checkCacheProvider{need: 0}, nil
|
||||
}
|
||||
return nil, errors.New("binary-only packages are no longer supported")
|
||||
}
|
||||
|
||||
if p.Module != nil && !allowedVersion(p.Module.GoVersion) {
|
||||
return nil, errors.New("module requires Go " + p.Module.GoVersion + " or later")
|
||||
}
|
||||
|
||||
if err := b.checkDirectives(buildAction); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := sh.Mkdir(buildAction.Objdir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Load cached cgo header, but only if we're skipping the main build (cachedBuild==true).
|
||||
if cachedBuild && need&needCgoHdr != 0 {
|
||||
if err := b.loadCachedCgoHdr(buildAction); err == nil {
|
||||
need &^= needCgoHdr
|
||||
}
|
||||
}
|
||||
|
||||
// Load cached coverage meta-data file fragment, but only if we're
|
||||
// skipping the main build (cachedBuild==true).
|
||||
if cachedBuild && need&needCovMetaFile != 0 {
|
||||
if err := b.loadCachedObjdirFile(buildAction, cache.Default(), covMetaFileName); err == nil {
|
||||
need &^= needCovMetaFile
|
||||
}
|
||||
}
|
||||
|
||||
// Load cached vet config, but only if that's all we have left
|
||||
// (need == needVet, not testing just the one bit).
|
||||
// If we are going to do a full build anyway,
|
||||
// we're going to regenerate the files in the build action anyway.
|
||||
if need == needVet {
|
||||
if err := b.loadCachedVet(buildAction); err == nil {
|
||||
need &^= needVet
|
||||
}
|
||||
}
|
||||
|
||||
return &checkCacheProvider{need: need}, nil
|
||||
}
|
||||
|
||||
func (b *Builder) runCover(a, buildAction *Action, objdir string, gofiles, cgofiles []string) (*coverProvider, error) {
|
||||
p := a.Package
|
||||
sh := b.Shell(a)
|
||||
|
||||
var cacheProvider *checkCacheProvider
|
||||
for _, dep := range a.Deps {
|
||||
if pr, ok := dep.Provider.(*checkCacheProvider); ok {
|
||||
cacheProvider = pr
|
||||
}
|
||||
}
|
||||
if cacheProvider == nil {
|
||||
base.Fatalf("internal error: could not find checkCacheProvider")
|
||||
}
|
||||
need := cacheProvider.need
|
||||
|
||||
if need == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if err := sh.Mkdir(a.Objdir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gofiles = slices.Clone(gofiles)
|
||||
cgofiles = slices.Clone(cgofiles)
|
||||
|
||||
outfiles := []string{}
|
||||
infiles := []string{}
|
||||
for i, file := range str.StringList(gofiles, cgofiles) {
|
||||
if base.IsTestFile(file) {
|
||||
continue // Not covering this file.
|
||||
}
|
||||
|
||||
var sourceFile string
|
||||
var coverFile string
|
||||
if base, found := strings.CutSuffix(file, ".cgo1.go"); found {
|
||||
// cgo files have absolute paths
|
||||
base = filepath.Base(base)
|
||||
sourceFile = file
|
||||
coverFile = objdir + base + ".cgo1.go"
|
||||
} else {
|
||||
sourceFile = filepath.Join(p.Dir, file)
|
||||
coverFile = objdir + file
|
||||
}
|
||||
coverFile = strings.TrimSuffix(coverFile, ".go") + ".cover.go"
|
||||
infiles = append(infiles, sourceFile)
|
||||
outfiles = append(outfiles, coverFile)
|
||||
if i < len(gofiles) {
|
||||
gofiles[i] = coverFile
|
||||
} else {
|
||||
cgofiles[i-len(gofiles)] = coverFile
|
||||
}
|
||||
}
|
||||
|
||||
if len(infiles) != 0 {
|
||||
// Coverage instrumentation creates new top level
|
||||
// variables in the target package for things like
|
||||
// meta-data containers, counter vars, etc. To avoid
|
||||
// collisions with user variables, suffix the var name
|
||||
// with 12 hex digits from the SHA-256 hash of the
|
||||
// import path. Choice of 12 digits is historical/arbitrary,
|
||||
// we just need enough of the hash to avoid accidents,
|
||||
// as opposed to precluding determined attempts by
|
||||
// users to break things.
|
||||
sum := sha256.Sum256([]byte(a.Package.ImportPath))
|
||||
coverVar := fmt.Sprintf("goCover_%x_", sum[:6])
|
||||
mode := a.Package.Internal.Cover.Mode
|
||||
if mode == "" {
|
||||
panic("covermode should be set at this point")
|
||||
}
|
||||
if newoutfiles, err := b.cover(a, infiles, outfiles, coverVar, mode); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
outfiles = newoutfiles
|
||||
gofiles = append([]string{newoutfiles[0]}, gofiles...)
|
||||
}
|
||||
if ca, ok := a.Actor.(*coverActor); ok && ca.covMetaFileName != "" {
|
||||
b.cacheObjdirFile(buildAction, cache.Default(), ca.covMetaFileName)
|
||||
}
|
||||
}
|
||||
return &coverProvider{gofiles, cgofiles}, nil
|
||||
}
|
||||
|
||||
// build is the action for building a single package.
|
||||
// Note that any new influence on this logic must be reported in b.buildActionID above as well.
|
||||
func (b *Builder) build(ctx context.Context, a *Action) (err error) {
|
||||
p := a.Package
|
||||
sh := b.Shell(a)
|
||||
|
||||
var cacheProvider *checkCacheProvider
|
||||
var coverPr *coverProvider
|
||||
for _, dep := range a.Deps {
|
||||
switch pr := dep.Provider.(type) {
|
||||
case *coverProvider:
|
||||
coverPr = pr
|
||||
case *checkCacheProvider:
|
||||
cacheProvider = pr
|
||||
}
|
||||
}
|
||||
if cacheProvider == nil {
|
||||
base.Fatalf("internal error: could not find checkCacheProvider")
|
||||
}
|
||||
|
||||
need := cacheProvider.need
|
||||
need &^= needCovMetaFile // handled by cover action
|
||||
|
||||
if need == 0 {
|
||||
return
|
||||
}
|
||||
defer b.flushOutput(a)
|
||||
|
||||
if cfg.BuildN {
|
||||
// In -n mode, print a banner between packages.
|
||||
// The banner is five lines so that when changes to
|
||||
|
|
@ -547,63 +721,8 @@ func (b *Builder) build(ctx context.Context, a *Action) (err error) {
|
|||
sh.Printf("%s\n", p.ImportPath)
|
||||
}
|
||||
|
||||
if p.Error != nil {
|
||||
// Don't try to build anything for packages with errors. There may be a
|
||||
// problem with the inputs that makes the package unsafe to build.
|
||||
return p.Error
|
||||
}
|
||||
|
||||
if p.BinaryOnly {
|
||||
p.Stale = true
|
||||
p.StaleReason = "binary-only packages are no longer supported"
|
||||
if b.IsCmdList {
|
||||
return nil
|
||||
}
|
||||
return errors.New("binary-only packages are no longer supported")
|
||||
}
|
||||
|
||||
if p.Module != nil && !allowedVersion(p.Module.GoVersion) {
|
||||
return errors.New("module requires Go " + p.Module.GoVersion + " or later")
|
||||
}
|
||||
|
||||
if err := b.checkDirectives(a); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sh.Mkdir(a.Objdir); err != nil {
|
||||
return err
|
||||
}
|
||||
objdir := a.Objdir
|
||||
|
||||
// Load cached cgo header, but only if we're skipping the main build (cachedBuild==true).
|
||||
if cachedBuild && need&needCgoHdr != 0 {
|
||||
if err := b.loadCachedCgoHdr(a); err == nil {
|
||||
need &^= needCgoHdr
|
||||
}
|
||||
}
|
||||
|
||||
// Load cached coverage meta-data file fragment, but only if we're
|
||||
// skipping the main build (cachedBuild==true).
|
||||
if cachedBuild && need&needCovMetaFile != 0 {
|
||||
bact := a.Actor.(*buildActor)
|
||||
if err := b.loadCachedObjdirFile(a, cache.Default(), bact.covMetaFileName); err == nil {
|
||||
need &^= needCovMetaFile
|
||||
}
|
||||
}
|
||||
|
||||
// Load cached vet config, but only if that's all we have left
|
||||
// (need == needVet, not testing just the one bit).
|
||||
// If we are going to do a full build anyway,
|
||||
// we're going to regenerate the files below anyway.
|
||||
if need == needVet {
|
||||
if err := b.loadCachedVet(a); err == nil {
|
||||
need &^= needVet
|
||||
}
|
||||
}
|
||||
if need == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := AllowInstall(a); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -658,60 +777,8 @@ OverlayLoop:
|
|||
|
||||
// If we're doing coverage, preprocess the .go files and put them in the work directory
|
||||
if p.Internal.Cover.Mode != "" {
|
||||
outfiles := []string{}
|
||||
infiles := []string{}
|
||||
for i, file := range str.StringList(gofiles, cgofiles) {
|
||||
if base.IsTestFile(file) {
|
||||
continue // Not covering this file.
|
||||
}
|
||||
|
||||
var sourceFile string
|
||||
var coverFile string
|
||||
if base, found := strings.CutSuffix(file, ".cgo1.go"); found {
|
||||
// cgo files have absolute paths
|
||||
base = filepath.Base(base)
|
||||
sourceFile = file
|
||||
coverFile = objdir + base + ".cgo1.go"
|
||||
} else {
|
||||
sourceFile = filepath.Join(p.Dir, file)
|
||||
coverFile = objdir + file
|
||||
}
|
||||
coverFile = strings.TrimSuffix(coverFile, ".go") + ".cover.go"
|
||||
infiles = append(infiles, sourceFile)
|
||||
outfiles = append(outfiles, coverFile)
|
||||
if i < len(gofiles) {
|
||||
gofiles[i] = coverFile
|
||||
} else {
|
||||
cgofiles[i-len(gofiles)] = coverFile
|
||||
}
|
||||
}
|
||||
|
||||
if len(infiles) != 0 {
|
||||
// Coverage instrumentation creates new top level
|
||||
// variables in the target package for things like
|
||||
// meta-data containers, counter vars, etc. To avoid
|
||||
// collisions with user variables, suffix the var name
|
||||
// with 12 hex digits from the SHA-256 hash of the
|
||||
// import path. Choice of 12 digits is historical/arbitrary,
|
||||
// we just need enough of the hash to avoid accidents,
|
||||
// as opposed to precluding determined attempts by
|
||||
// users to break things.
|
||||
sum := sha256.Sum256([]byte(a.Package.ImportPath))
|
||||
coverVar := fmt.Sprintf("goCover_%x_", sum[:6])
|
||||
mode := a.Package.Internal.Cover.Mode
|
||||
if mode == "" {
|
||||
panic("covermode should be set at this point")
|
||||
}
|
||||
if newoutfiles, err := b.cover(a, infiles, outfiles, coverVar, mode); err != nil {
|
||||
return err
|
||||
} else {
|
||||
outfiles = newoutfiles
|
||||
gofiles = append([]string{newoutfiles[0]}, gofiles...)
|
||||
}
|
||||
if ba, ok := a.Actor.(*buildActor); ok && ba.covMetaFileName != "" {
|
||||
b.cacheObjdirFile(a, cache.Default(), ba.covMetaFileName)
|
||||
}
|
||||
}
|
||||
gofiles = coverPr.goSources
|
||||
cgofiles = coverPr.cgoSources
|
||||
}
|
||||
|
||||
// Run SWIG on each .swig and .swigcxx file.
|
||||
|
|
@ -1209,7 +1276,7 @@ func buildVetConfig(a *Action, srcfiles []string) {
|
|||
|
||||
for _, a1 := range a.Deps {
|
||||
p1 := a1.Package
|
||||
if p1 == nil || p1.ImportPath == "" {
|
||||
if p1 == nil || p1.ImportPath == "" || p1 == a.Package {
|
||||
continue
|
||||
}
|
||||
// Add import mapping if needed
|
||||
|
|
@ -1951,8 +2018,8 @@ func (b *Builder) writeCoverPkgInputs(a *Action, pconfigfile string, covoutputsf
|
|||
OutConfig: p.Internal.Cover.Cfg,
|
||||
Local: p.Internal.Local,
|
||||
}
|
||||
if ba, ok := a.Actor.(*buildActor); ok && ba.covMetaFileName != "" {
|
||||
pcfg.EmitMetaFile = a.Objdir + ba.covMetaFileName
|
||||
if ca, ok := a.Actor.(*coverActor); ok && ca.covMetaFileName != "" {
|
||||
pcfg.EmitMetaFile = a.Objdir + ca.covMetaFileName
|
||||
}
|
||||
if a.Package.Module != nil {
|
||||
pcfg.ModulePath = a.Package.Module.Path
|
||||
|
|
|
|||
|
|
@ -132,11 +132,47 @@ func (sh *Shell) moveOrCopyFile(dst, src string, perm fs.FileMode, force bool) e
|
|||
return sh.CopyFile(dst, src, perm, force)
|
||||
}
|
||||
|
||||
if err := sh.move(src, dst, perm); err == nil {
|
||||
if cfg.BuildX {
|
||||
sh.ShowCmd("", "mv %s %s", src, dst)
|
||||
// On Windows, always copy the file, so that we respect the NTFS
|
||||
// permissions of the parent folder. https://golang.org/issue/22343.
|
||||
// What matters here is not cfg.Goos (the system we are building
|
||||
// for) but runtime.GOOS (the system we are building on).
|
||||
if runtime.GOOS == "windows" {
|
||||
return sh.CopyFile(dst, src, perm, force)
|
||||
}
|
||||
|
||||
// If the destination directory has the group sticky bit set,
|
||||
// we have to copy the file to retain the correct permissions.
|
||||
// https://golang.org/issue/18878
|
||||
if fi, err := os.Stat(filepath.Dir(dst)); err == nil {
|
||||
if fi.IsDir() && (fi.Mode()&fs.ModeSetgid) != 0 {
|
||||
return sh.CopyFile(dst, src, perm, force)
|
||||
}
|
||||
}
|
||||
|
||||
// The perm argument is meant to be adjusted according to umask,
|
||||
// but we don't know what the umask is.
|
||||
// Create a dummy file to find out.
|
||||
// This avoids build tags and works even on systems like Plan 9
|
||||
// where the file mask computation incorporates other information.
|
||||
mode := perm
|
||||
f, err := os.OpenFile(filepath.Clean(dst)+"-go-tmp-umask", os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm)
|
||||
if err == nil {
|
||||
fi, err := f.Stat()
|
||||
if err == nil {
|
||||
mode = fi.Mode() & 0777
|
||||
}
|
||||
name := f.Name()
|
||||
f.Close()
|
||||
os.Remove(name)
|
||||
}
|
||||
|
||||
if err := os.Chmod(src, mode); err == nil {
|
||||
if err := os.Rename(src, dst); err == nil {
|
||||
if cfg.BuildX {
|
||||
sh.ShowCmd("", "mv %s %s", src, dst)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return sh.CopyFile(dst, src, perm, force)
|
||||
|
|
|
|||
|
|
@ -1,49 +0,0 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !windows
|
||||
|
||||
package work
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// move moves a file from src to dst setting the permissions
|
||||
// on the destination file to inherit the permissions from the
|
||||
// destination parent directory.
|
||||
func (sh *Shell) move(src, dst string, perm fs.FileMode) error {
|
||||
// If the destination directory has the group sticky bit set,
|
||||
// we have to copy the file to retain the correct permissions.
|
||||
// https://golang.org/issue/18878
|
||||
if fi, err := os.Stat(filepath.Dir(dst)); err == nil {
|
||||
if fi.IsDir() && (fi.Mode()&fs.ModeSetgid) != 0 {
|
||||
return errors.ErrUnsupported
|
||||
}
|
||||
}
|
||||
// The perm argument is meant to be adjusted according to umask,
|
||||
// but we don't know what the umask is.
|
||||
// Create a dummy file to find out.
|
||||
// This works even on systems like Plan 9 where the
|
||||
// file mask computation incorporates other information.
|
||||
mode := perm
|
||||
f, err := os.OpenFile(filepath.Clean(dst)+"-go-tmp-umask", os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm)
|
||||
if err == nil {
|
||||
fi, err := f.Stat()
|
||||
if err == nil {
|
||||
mode = fi.Mode() & 0777
|
||||
}
|
||||
name := f.Name()
|
||||
f.Close()
|
||||
os.Remove(name)
|
||||
}
|
||||
|
||||
if err := os.Chmod(src, mode); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Rename(src, dst)
|
||||
}
|
||||
|
|
@ -1,37 +0,0 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package work
|
||||
|
||||
import (
|
||||
"internal/syscall/windows"
|
||||
"io/fs"
|
||||
"os"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// move moves a file from src to dst, setting the security information
|
||||
// on the destination file to inherit the permissions from the
|
||||
// destination parent directory.
|
||||
func (sh *Shell) move(src, dst string, perm fs.FileMode) (err error) {
|
||||
if err := os.Rename(src, dst); err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
os.Remove(dst) // clean up if we failed to set the mode or security info
|
||||
}
|
||||
}()
|
||||
if err := os.Chmod(dst, perm); err != nil {
|
||||
return err
|
||||
}
|
||||
// We need to respect the ACL permissions of the destination parent folder.
|
||||
// https://go.dev/issue/22343.
|
||||
var acl windows.ACL
|
||||
if err := windows.InitializeAcl(&acl, uint32(unsafe.Sizeof(acl)), windows.ACL_REVISION); err != nil {
|
||||
return err
|
||||
}
|
||||
secInfo := windows.DACL_SECURITY_INFORMATION | windows.UNPROTECTED_DACL_SECURITY_INFORMATION
|
||||
return windows.SetNamedSecurityInfo(dst, windows.SE_FILE_OBJECT, secInfo, nil, nil, &acl, nil)
|
||||
}
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
golang.org/toolchain@v0.0.1-go1.999testmod.windows-arm
|
||||
|
||||
-- .mod --
|
||||
module golang.org/toolchain
|
||||
-- .info --
|
||||
{"Version":"v0.0.1-go1.999testmod.windows-arm"}
|
||||
-- go.mod --
|
||||
module golang.org/toolchain
|
||||
-- bin/go.bat --
|
||||
@echo go1.999testmod here!
|
||||
-- pkg/tool/fake --
|
||||
-- lib/wasm/go_js_wasm_exec --
|
||||
|
|
@ -225,8 +225,6 @@ const (
|
|||
REGZERO = REG_R0 // set to zero
|
||||
REGLINK = REG_R1
|
||||
REGSP = REG_R3
|
||||
REGRT1 = REG_R20 // reserved for runtime, duffzero and duffcopy
|
||||
REGRT2 = REG_R21 // reserved for runtime, duffcopy
|
||||
REGCTXT = REG_R29 // context for closures
|
||||
REGG = REG_R22 // G in loong64
|
||||
REGTMP = REG_R30 // used by the assembler
|
||||
|
|
@ -567,6 +565,9 @@ const (
|
|||
AMOVVF
|
||||
AMOVVD
|
||||
|
||||
// 2.2.1.2
|
||||
AADDV16
|
||||
|
||||
// 2.2.1.3
|
||||
AALSLW
|
||||
AALSLWU
|
||||
|
|
|
|||
|
|
@ -125,6 +125,7 @@ var Anames = []string{
|
|||
"MOVDV",
|
||||
"MOVVF",
|
||||
"MOVVD",
|
||||
"ADDV16",
|
||||
"ALSLW",
|
||||
"ALSLWU",
|
||||
"ALSLV",
|
||||
|
|
|
|||
|
|
@ -267,6 +267,9 @@ var optab = []Optab{
|
|||
{AADDV, C_U12CON, C_REG, C_NONE, C_REG, C_NONE, 10, 8, 0, 0},
|
||||
{AADDV, C_U12CON, C_NONE, C_NONE, C_REG, C_NONE, 10, 8, 0, 0},
|
||||
|
||||
{AADDV16, C_32CON, C_REG, C_NONE, C_REG, C_NONE, 4, 4, 0, 0},
|
||||
{AADDV16, C_32CON, C_NONE, C_NONE, C_REG, C_NONE, 4, 4, 0, 0},
|
||||
|
||||
{AAND, C_UU12CON, C_REG, C_NONE, C_REG, C_NONE, 4, 4, 0, 0},
|
||||
{AAND, C_UU12CON, C_NONE, C_NONE, C_REG, C_NONE, 4, 4, 0, 0},
|
||||
{AAND, C_S12CON, C_REG, C_NONE, C_REG, C_NONE, 10, 8, 0, 0},
|
||||
|
|
@ -440,8 +443,6 @@ var optab = []Optab{
|
|||
{obj.ANOP, C_DCON, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0}, // nop variants, see #40689
|
||||
{obj.ANOP, C_REG, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0},
|
||||
{obj.ANOP, C_FREG, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0},
|
||||
{obj.ADUFFZERO, C_NONE, C_NONE, C_NONE, C_BRAN, C_NONE, 11, 4, 0, 0}, // same as AJMP
|
||||
{obj.ADUFFCOPY, C_NONE, C_NONE, C_NONE, C_BRAN, C_NONE, 11, 4, 0, 0}, // same as AJMP
|
||||
}
|
||||
|
||||
var atomicInst = map[obj.As]uint32{
|
||||
|
|
@ -1522,13 +1523,12 @@ func buildop(ctxt *obj.Link) {
|
|||
APRELD,
|
||||
APRELDX,
|
||||
AFSEL,
|
||||
AADDV16,
|
||||
obj.ANOP,
|
||||
obj.ATEXT,
|
||||
obj.AFUNCDATA,
|
||||
obj.APCALIGN,
|
||||
obj.APCDATA,
|
||||
obj.ADUFFZERO,
|
||||
obj.ADUFFCOPY:
|
||||
obj.APCDATA:
|
||||
break
|
||||
|
||||
case ARDTIMELW:
|
||||
|
|
@ -1983,6 +1983,18 @@ func OP_12IRR(op uint32, i uint32, r2 uint32, r3 uint32) uint32 {
|
|||
return op | (i&0xFFF)<<10 | (r2&0x1F)<<5 | (r3&0x1F)<<0
|
||||
}
|
||||
|
||||
func OP_11IRR(op uint32, i uint32, r2 uint32, r3 uint32) uint32 {
|
||||
return op | (i&0x7FF)<<10 | (r2&0x1F)<<5 | (r3&0x1F)<<0
|
||||
}
|
||||
|
||||
func OP_10IRR(op uint32, i uint32, r2 uint32, r3 uint32) uint32 {
|
||||
return op | (i&0x3FF)<<10 | (r2&0x1F)<<5 | (r3&0x1F)<<0
|
||||
}
|
||||
|
||||
func OP_9IRR(op uint32, i uint32, r2 uint32, r3 uint32) uint32 {
|
||||
return op | (i&0x1FF)<<10 | (r2&0x1F)<<5 | (r3&0x1F)<<0
|
||||
}
|
||||
|
||||
func OP_8IRR(op uint32, i uint32, r2 uint32, r3 uint32) uint32 {
|
||||
return op | (i&0xFF)<<10 | (r2&0x1F)<<5 | (r3&0x1F)<<0
|
||||
}
|
||||
|
|
@ -2079,7 +2091,14 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
|
|||
if r == 0 {
|
||||
r = int(p.To.Reg)
|
||||
}
|
||||
o1 = OP_12IRR(c.opirr(p.As), uint32(v), uint32(r), uint32(p.To.Reg))
|
||||
if p.As == AADDV16 {
|
||||
if v&65535 != 0 {
|
||||
c.ctxt.Diag("%v: the constant must be a multiple of 65536.\n", p)
|
||||
}
|
||||
o1 = OP_16IRR(c.opirr(p.As), uint32(v>>16), uint32(r), uint32(p.To.Reg))
|
||||
} else {
|
||||
o1 = OP_12IRR(c.opirr(p.As), uint32(v), uint32(r), uint32(p.To.Reg))
|
||||
}
|
||||
|
||||
case 5: // syscall
|
||||
v := c.regoff(&p.From)
|
||||
|
|
@ -2535,7 +2554,28 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
|
|||
si := c.regoff(&p.From)
|
||||
Rj := uint32(p.From.Reg & EXT_REG_MASK)
|
||||
Vd := uint32(p.To.Reg & EXT_REG_MASK)
|
||||
o1 = v | uint32(si<<10) | (Rj << 5) | Vd
|
||||
switch v & 0xc00000 {
|
||||
case 0x800000: // [x]vldrepl.b
|
||||
o1 = OP_12IRR(v, uint32(si), Rj, Vd)
|
||||
case 0x400000: // [x]vldrepl.h
|
||||
if si&1 != 0 {
|
||||
c.ctxt.Diag("%v: offset must be a multiple of 2.\n", p)
|
||||
}
|
||||
o1 = OP_11IRR(v, uint32(si>>1), Rj, Vd)
|
||||
case 0x0:
|
||||
switch v & 0x300000 {
|
||||
case 0x200000: // [x]vldrepl.w
|
||||
if si&3 != 0 {
|
||||
c.ctxt.Diag("%v: offset must be a multiple of 4.\n", p)
|
||||
}
|
||||
o1 = OP_10IRR(v, uint32(si>>2), Rj, Vd)
|
||||
case 0x100000: // [x]vldrepl.d
|
||||
if si&7 != 0 {
|
||||
c.ctxt.Diag("%v: offset must be a multiple of 8.\n", p)
|
||||
}
|
||||
o1 = OP_9IRR(v, uint32(si>>3), Rj, Vd)
|
||||
}
|
||||
}
|
||||
|
||||
case 47: // preld offset(Rbase), $hint
|
||||
offs := c.regoff(&p.From)
|
||||
|
|
@ -4004,12 +4044,12 @@ func (c *ctxt0) opirr(a obj.As) uint32 {
|
|||
return 0x00b << 22
|
||||
case AADDVU:
|
||||
return 0x00b << 22
|
||||
case AADDV16:
|
||||
return 0x4 << 26
|
||||
|
||||
case AJMP:
|
||||
return 0x14 << 26
|
||||
case AJAL,
|
||||
obj.ADUFFZERO,
|
||||
obj.ADUFFCOPY:
|
||||
case AJAL:
|
||||
return 0x15 << 26
|
||||
|
||||
case AJIRL:
|
||||
|
|
|
|||
|
|
@ -220,6 +220,15 @@ Note: In the following sections 3.1 to 3.6, "ui4" (4-bit unsigned int immediate)
|
|||
XVMOVQ offset(Rj), Xd.W8 | xvldrepl.w Xd, Rj, si10 | for i in range(8) : XR[xd].w[i] = load 32 bit memory data from (GR[rj]+SignExtend(si10<<2))
|
||||
XVMOVQ offset(Rj), Xd.V4 | xvldrepl.d Xd, Rj, si9 | for i in range(4) : XR[xd].d[i] = load 64 bit memory data from (GR[rj]+SignExtend(si9<<3))
|
||||
|
||||
note: In Go assembly, for ease of understanding, offset representing the actual address offset.
|
||||
However, during platform encoding, the offset is shifted to increase the encodable offset range, as follows:
|
||||
|
||||
Go assembly | platform assembly
|
||||
VMOVQ 1(R4), V5.B16 | vldrepl.b v5, r4, $1
|
||||
VMOVQ 2(R4), V5.H8 | vldrepl.h v5, r4, $1
|
||||
VMOVQ 8(R4), V5.W4 | vldrepl.w v5, r4, $2
|
||||
VMOVQ 8(R4), V5.V2 | vldrepl.d v5, r4, $1
|
||||
|
||||
# Special instruction encoding definition and description on LoongArch
|
||||
|
||||
1. DBAR hint encoding for LA664(Loongson 3A6000) and later micro-architectures, paraphrased
|
||||
|
|
@ -317,6 +326,18 @@ Note: In the following sections 3.1 to 3.6, "ui4" (4-bit unsigned int immediate)
|
|||
Go assembly | platform assembly
|
||||
MOVWP 8(R4), R5 | ldptr.w r5, r4, $2
|
||||
|
||||
6. Note of special add instrction
|
||||
Mapping between Go and platform assembly:
|
||||
Go assembly | platform assembly
|
||||
ADDV16 si16<<16, Rj, Rd | addu16i.d rd, rj, si16
|
||||
|
||||
note: si16 is a 16-bit immediate number, and si16<<16 is the actual operand.
|
||||
|
||||
The addu16i.d instruction logically left-shifts the 16-bit immediate number si16 by 16 bits, then
|
||||
sign-extends it. The resulting data is added to the [63:0] bits of data in the general-purpose register
|
||||
rj, and the sum is written into the general-purpose register rd.
|
||||
The addu16i.d instruction is used in conjunction with the ldptr.w/d and stptr.w/d instructions to
|
||||
accelerate access based on the GOT table in position-independent code.
|
||||
*/
|
||||
|
||||
package loong64
|
||||
|
|
|
|||
|
|
@ -17,11 +17,7 @@ import (
|
|||
func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
|
||||
// Rewrite JMP/JAL to symbol as TYPE_BRANCH.
|
||||
switch p.As {
|
||||
case AJMP,
|
||||
AJAL,
|
||||
ARET,
|
||||
obj.ADUFFZERO,
|
||||
obj.ADUFFCOPY:
|
||||
case AJMP, AJAL, ARET:
|
||||
if p.To.Sym != nil {
|
||||
p.To.Type = obj.TYPE_BRANCH
|
||||
}
|
||||
|
|
@ -93,40 +89,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
|
|||
}
|
||||
|
||||
func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
|
||||
// ADUFFxxx $offset
|
||||
// becomes
|
||||
// MOVV runtime.duffxxx@GOT, REGTMP
|
||||
// ADD $offset, REGTMP
|
||||
// JAL REGTMP
|
||||
if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO {
|
||||
var sym *obj.LSym
|
||||
if p.As == obj.ADUFFZERO {
|
||||
sym = ctxt.LookupABI("runtime.duffzero", obj.ABIInternal)
|
||||
} else {
|
||||
sym = ctxt.LookupABI("runtime.duffcopy", obj.ABIInternal)
|
||||
}
|
||||
offset := p.To.Offset
|
||||
p.As = AMOVV
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Sym = sym
|
||||
p.From.Name = obj.NAME_GOTREF
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = REGTMP
|
||||
p.To.Name = obj.NAME_NONE
|
||||
p.To.Offset = 0
|
||||
p.To.Sym = nil
|
||||
p1 := obj.Appendp(p, newprog)
|
||||
p1.As = AADDV
|
||||
p1.From.Type = obj.TYPE_CONST
|
||||
p1.From.Offset = offset
|
||||
p1.To.Type = obj.TYPE_REG
|
||||
p1.To.Reg = REGTMP
|
||||
p2 := obj.Appendp(p1, newprog)
|
||||
p2.As = AJAL
|
||||
p2.To.Type = obj.TYPE_MEM
|
||||
p2.To.Reg = REGTMP
|
||||
}
|
||||
|
||||
// We only care about global data: NAME_EXTERN means a global
|
||||
// symbol in the Go sense, and p.Sym.Local is true for a few
|
||||
// internally defined symbols.
|
||||
|
|
@ -256,9 +218,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
|
|||
}
|
||||
}
|
||||
|
||||
case AJAL,
|
||||
obj.ADUFFZERO,
|
||||
obj.ADUFFCOPY:
|
||||
case AJAL:
|
||||
c.cursym.Func().Text.Mark &^= LEAF
|
||||
fallthrough
|
||||
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ func buildop(ctxt *obj.Link) {}
|
|||
|
||||
func jalToSym(ctxt *obj.Link, p *obj.Prog, lr int16) {
|
||||
switch p.As {
|
||||
case obj.ACALL, obj.AJMP, obj.ARET, obj.ADUFFZERO, obj.ADUFFCOPY:
|
||||
case obj.ACALL, obj.AJMP, obj.ARET:
|
||||
default:
|
||||
ctxt.Diag("unexpected Prog in jalToSym: %v", p)
|
||||
return
|
||||
|
|
@ -162,42 +162,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
|
|||
|
||||
// Rewrite p, if necessary, to access global data via the global offset table.
|
||||
func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
|
||||
if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO {
|
||||
// ADUFFxxx $offset
|
||||
// becomes
|
||||
// MOV runtime.duffxxx@GOT, REG_TMP
|
||||
// ADD $offset, REG_TMP
|
||||
// CALL REG_TMP
|
||||
var sym *obj.LSym
|
||||
if p.As == obj.ADUFFCOPY {
|
||||
sym = ctxt.LookupABI("runtime.duffcopy", obj.ABIInternal)
|
||||
} else {
|
||||
sym = ctxt.LookupABI("runtime.duffzero", obj.ABIInternal)
|
||||
}
|
||||
offset := p.To.Offset
|
||||
p.As = AMOV
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Name = obj.NAME_GOTREF
|
||||
p.From.Sym = sym
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = REG_TMP
|
||||
p.To.Name = obj.NAME_NONE
|
||||
p.To.Offset = 0
|
||||
p.To.Sym = nil
|
||||
|
||||
p1 := obj.Appendp(p, newprog)
|
||||
p1.As = AADD
|
||||
p1.From.Type = obj.TYPE_CONST
|
||||
p1.From.Offset = offset
|
||||
p1.To.Type = obj.TYPE_REG
|
||||
p1.To.Reg = REG_TMP
|
||||
|
||||
p2 := obj.Appendp(p1, newprog)
|
||||
p2.As = obj.ACALL
|
||||
p2.To.Type = obj.TYPE_REG
|
||||
p2.To.Reg = REG_TMP
|
||||
}
|
||||
|
||||
// We only care about global data: NAME_EXTERN means a global
|
||||
// symbol in the Go sense and p.Sym.Local is true for a few internally
|
||||
// defined symbols.
|
||||
|
|
@ -407,7 +371,7 @@ func containsCall(sym *obj.LSym) bool {
|
|||
// CALLs are CALL or JAL(R) with link register LR.
|
||||
for p := sym.Func().Text; p != nil; p = p.Link {
|
||||
switch p.As {
|
||||
case obj.ACALL, obj.ADUFFZERO, obj.ADUFFCOPY:
|
||||
case obj.ACALL:
|
||||
return true
|
||||
case AJAL, AJALR:
|
||||
if p.From.Type == obj.TYPE_REG && p.From.Reg == REG_LR {
|
||||
|
|
@ -586,7 +550,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
|
|||
p.From.Reg = REG_SP
|
||||
}
|
||||
|
||||
case obj.ACALL, obj.ADUFFZERO, obj.ADUFFCOPY:
|
||||
case obj.ACALL:
|
||||
switch p.To.Type {
|
||||
case obj.TYPE_MEM:
|
||||
jalToSym(ctxt, p, REG_LR)
|
||||
|
|
@ -2634,8 +2598,6 @@ var instructions = [ALAST & obj.AMask]instructionData{
|
|||
obj.APCDATA: {enc: pseudoOpEncoding},
|
||||
obj.ATEXT: {enc: pseudoOpEncoding},
|
||||
obj.ANOP: {enc: pseudoOpEncoding},
|
||||
obj.ADUFFZERO: {enc: pseudoOpEncoding},
|
||||
obj.ADUFFCOPY: {enc: pseudoOpEncoding},
|
||||
obj.APCALIGN: {enc: pseudoOpEncoding},
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -59,6 +59,12 @@ package main
|
|||
func main() {}
|
||||
`
|
||||
|
||||
var goSourceWithData = `
|
||||
package main
|
||||
var globalVar = 42
|
||||
func main() { println(&globalVar) }
|
||||
`
|
||||
|
||||
// The linker used to crash if an ELF input file had multiple text sections
|
||||
// with the same name.
|
||||
func TestSectionsWithSameName(t *testing.T) {
|
||||
|
|
@ -569,3 +575,106 @@ func TestFlagR(t *testing.T) {
|
|||
t.Errorf("executable failed to run: %v\n%s", err, out)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFlagD(t *testing.T) {
|
||||
// Test that using the -D flag to specify data section address generates
|
||||
// a working binary with data at the specified address.
|
||||
t.Parallel()
|
||||
testFlagD(t, "0x10000000", "", 0x10000000)
|
||||
}
|
||||
|
||||
func TestFlagDUnaligned(t *testing.T) {
|
||||
// Test that using the -D flag with an unaligned address errors out
|
||||
t.Parallel()
|
||||
testFlagDError(t, "0x10000123", "", "invalid -D value 0x10000123")
|
||||
}
|
||||
|
||||
func TestFlagDWithR(t *testing.T) {
|
||||
// Test that using the -D flag with -R flag errors on unaligned address.
|
||||
t.Parallel()
|
||||
testFlagDError(t, "0x30001234", "8192", "invalid -D value 0x30001234")
|
||||
}
|
||||
|
||||
func testFlagD(t *testing.T, dataAddr string, roundQuantum string, expectedAddr uint64) {
|
||||
testenv.MustHaveGoBuild(t)
|
||||
tmpdir := t.TempDir()
|
||||
src := filepath.Join(tmpdir, "x.go")
|
||||
if err := os.WriteFile(src, []byte(goSourceWithData), 0444); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
exe := filepath.Join(tmpdir, "x.exe")
|
||||
|
||||
// Build linker flags
|
||||
ldflags := "-D=" + dataAddr
|
||||
if roundQuantum != "" {
|
||||
ldflags += " -R=" + roundQuantum
|
||||
}
|
||||
|
||||
cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-ldflags="+ldflags, "-o", exe, src)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("build failed: %v, output:\n%s", err, out)
|
||||
}
|
||||
|
||||
cmd = testenv.Command(t, exe)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Errorf("executable failed to run: %v\n%s", err, out)
|
||||
}
|
||||
|
||||
ef, err := elf.Open(exe)
|
||||
if err != nil {
|
||||
t.Fatalf("open elf file failed: %v", err)
|
||||
}
|
||||
defer ef.Close()
|
||||
|
||||
// Find the first data-related section to verify segment placement
|
||||
var firstDataSectionAddr uint64
|
||||
var found bool = false
|
||||
for _, sec := range ef.Sections {
|
||||
if sec.Type == elf.SHT_PROGBITS || sec.Type == elf.SHT_NOBITS {
|
||||
// These sections are writable, allocated at runtime, but not executable
|
||||
isWrite := sec.Flags&elf.SHF_WRITE != 0
|
||||
isExec := sec.Flags&elf.SHF_EXECINSTR != 0
|
||||
isAlloc := sec.Flags&elf.SHF_ALLOC != 0
|
||||
|
||||
if isWrite && !isExec && isAlloc {
|
||||
addrLower := sec.Addr < firstDataSectionAddr
|
||||
if !found || addrLower {
|
||||
firstDataSectionAddr = sec.Addr
|
||||
found = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
t.Fatalf("can't find any writable data sections")
|
||||
}
|
||||
if firstDataSectionAddr != expectedAddr {
|
||||
t.Errorf("data section starts at 0x%x, expected 0x%x", firstDataSectionAddr, expectedAddr)
|
||||
}
|
||||
}
|
||||
|
||||
func testFlagDError(t *testing.T, dataAddr string, roundQuantum string, expectedError string) {
|
||||
testenv.MustHaveGoBuild(t)
|
||||
tmpdir := t.TempDir()
|
||||
src := filepath.Join(tmpdir, "x.go")
|
||||
if err := os.WriteFile(src, []byte(goSourceWithData), 0444); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
exe := filepath.Join(tmpdir, "x.exe")
|
||||
|
||||
// Build linker flags
|
||||
ldflags := "-D=" + dataAddr
|
||||
if roundQuantum != "" {
|
||||
ldflags += " -R=" + roundQuantum
|
||||
}
|
||||
|
||||
cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-ldflags="+ldflags, "-o", exe, src)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err == nil {
|
||||
t.Fatalf("expected build to fail with unaligned data address, but it succeeded")
|
||||
}
|
||||
if !strings.Contains(string(out), expectedError) {
|
||||
t.Errorf("expected error message to contain %q, got:\n%s", expectedError, out)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2881,7 +2881,12 @@ func (ctxt *Link) address() []*sym.Segment {
|
|||
}
|
||||
order = append(order, &Segdata)
|
||||
Segdata.Rwx = 06
|
||||
Segdata.Vaddr = va
|
||||
if *FlagDataAddr != -1 {
|
||||
Segdata.Vaddr = uint64(*FlagDataAddr)
|
||||
va = Segdata.Vaddr
|
||||
} else {
|
||||
Segdata.Vaddr = va
|
||||
}
|
||||
var data *sym.Section
|
||||
var noptr *sym.Section
|
||||
var bss *sym.Section
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build darwin || (freebsd && go1.21) || linux || (netbsd && go1.25)
|
||||
//go:build darwin || freebsd || linux || (netbsd && go1.25)
|
||||
|
||||
package ld
|
||||
|
||||
|
|
|
|||
|
|
@ -442,3 +442,25 @@ func d()
|
|||
t.Errorf("Trampoline b-tramp0 exists unnecessarily")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRounding(t *testing.T) {
|
||||
testCases := []struct {
|
||||
input int64
|
||||
quantum int64
|
||||
expected int64
|
||||
}{
|
||||
{0x30000000, 0x2000, 0x30000000}, // Already aligned
|
||||
{0x30002000, 0x2000, 0x30002000}, // Exactly on boundary
|
||||
{0x30001234, 0x2000, 0x30002000},
|
||||
{0x30001000, 0x2000, 0x30002000},
|
||||
{0x30001fff, 0x2000, 0x30002000},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
result := Rnd(tc.input, tc.quantum)
|
||||
if result != tc.expected {
|
||||
t.Errorf("Rnd(0x%x, 0x%x) = 0x%x, expected 0x%x",
|
||||
tc.input, tc.quantum, result, tc.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -105,6 +105,7 @@ var (
|
|||
FlagStrictDups = flag.Int("strictdups", 0, "sanity check duplicate symbol contents during object file reading (1=warn 2=err).")
|
||||
FlagRound = flag.Int64("R", -1, "set address rounding `quantum`")
|
||||
FlagTextAddr = flag.Int64("T", -1, "set the start address of text symbols")
|
||||
FlagDataAddr = flag.Int64("D", -1, "set the start address of data symbols")
|
||||
FlagFuncAlign = flag.Int("funcalign", 0, "set function align to `N` bytes")
|
||||
flagEntrySymbol = flag.String("E", "", "set `entry` symbol name")
|
||||
flagPruneWeakMap = flag.Bool("pruneweakmap", true, "prune weak mapinit refs")
|
||||
|
|
@ -317,6 +318,10 @@ func Main(arch *sys.Arch, theArch Arch) {
|
|||
bench.Start("Archinit")
|
||||
thearch.Archinit(ctxt)
|
||||
|
||||
if *FlagDataAddr != -1 && *FlagDataAddr%*FlagRound != 0 {
|
||||
Exitf("invalid -D value 0x%x: not aligned to rounding quantum 0x%x", *FlagDataAddr, *FlagRound)
|
||||
}
|
||||
|
||||
if ctxt.linkShared && !ctxt.IsELF {
|
||||
Exitf("-linkshared can only be used on elf systems")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (freebsd && go1.21) || (netbsd && go1.25)
|
||||
//go:build freebsd || (netbsd && go1.25)
|
||||
|
||||
package ld
|
||||
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !darwin && !(freebsd && go1.21) && !linux && !(netbsd && go1.25)
|
||||
//go:build !darwin && !freebsd && !linux && !(netbsd && go1.25)
|
||||
|
||||
package ld
|
||||
|
||||
|
|
|
|||
|
|
@ -153,10 +153,6 @@ func makeComputePprofFunc(state trace.GoState, trackReason func(string) bool) co
|
|||
if ev.Kind() != trace.EventStateTransition {
|
||||
continue
|
||||
}
|
||||
stack := ev.Stack()
|
||||
if stack == trace.NoStack {
|
||||
continue
|
||||
}
|
||||
|
||||
// The state transition has to apply to a goroutine.
|
||||
st := ev.StateTransition()
|
||||
|
|
|
|||
103
src/cmd/trace/pprof_test.go
Normal file
103
src/cmd/trace/pprof_test.go
Normal file
|
|
@ -0,0 +1,103 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime/trace"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/synctest"
|
||||
"time"
|
||||
|
||||
"internal/trace/testtrace"
|
||||
)
|
||||
|
||||
// Regression test for go.dev/issue/74850.
|
||||
func TestSyscallProfile74850(t *testing.T) {
|
||||
testtrace.MustHaveSyscallEvents(t)
|
||||
|
||||
var buf bytes.Buffer
|
||||
err := trace.Start(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("start tracing: %v", err)
|
||||
}
|
||||
|
||||
synctest.Test(t, func(t *testing.T) {
|
||||
go hidden1(t)
|
||||
go hidden2(t)
|
||||
go visible(t)
|
||||
synctest.Wait()
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
synctest.Wait()
|
||||
})
|
||||
trace.Stop()
|
||||
|
||||
if t.Failed() {
|
||||
return
|
||||
}
|
||||
|
||||
parsed, err := parseTrace(&buf, int64(buf.Len()))
|
||||
if err != nil {
|
||||
t.Fatalf("parsing trace: %v", err)
|
||||
}
|
||||
|
||||
records, err := pprofByGoroutine(computePprofSyscall(), parsed)(&http.Request{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to generate pprof: %v\n", err)
|
||||
}
|
||||
|
||||
for _, r := range records {
|
||||
t.Logf("Record: n=%d, total=%v", r.Count, r.Time)
|
||||
for _, f := range r.Stack {
|
||||
t.Logf("\t%s", f.Func)
|
||||
t.Logf("\t\t%s:%d @ 0x%x", f.File, f.Line, f.PC)
|
||||
}
|
||||
}
|
||||
if len(records) == 0 {
|
||||
t.Error("empty profile")
|
||||
}
|
||||
|
||||
// Make sure we see the right frames.
|
||||
wantSymbols := []string{"cmd/trace.visible", "cmd/trace.hidden1", "cmd/trace.hidden2"}
|
||||
haveSymbols := make([]bool, len(wantSymbols))
|
||||
for _, r := range records {
|
||||
for _, f := range r.Stack {
|
||||
for i, s := range wantSymbols {
|
||||
if strings.Contains(f.Func, s) {
|
||||
haveSymbols[i] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for i, have := range haveSymbols {
|
||||
if !have {
|
||||
t.Errorf("expected %s in syscall profile", wantSymbols[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func stat(t *testing.T) {
|
||||
_, err := os.Stat(".")
|
||||
if err != nil {
|
||||
t.Errorf("os.Stat: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func hidden1(t *testing.T) {
|
||||
stat(t)
|
||||
}
|
||||
|
||||
func hidden2(t *testing.T) {
|
||||
stat(t)
|
||||
stat(t)
|
||||
}
|
||||
|
||||
func visible(t *testing.T) {
|
||||
stat(t)
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
284
src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/fix.go
generated
vendored
Normal file
284
src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/fix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,284 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package analysisflags
|
||||
|
||||
// This file defines the -fix logic common to unitchecker and
|
||||
// {single,multi}checker.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/format"
|
||||
"go/token"
|
||||
"log"
|
||||
"maps"
|
||||
"os"
|
||||
"sort"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/internal/analysisinternal"
|
||||
"golang.org/x/tools/internal/diff"
|
||||
)
|
||||
|
||||
// FixAction abstracts a checker action (running one analyzer on one
|
||||
// package) for the purposes of applying its diagnostics' fixes.
|
||||
type FixAction struct {
|
||||
Name string // e.g. "analyzer@package"
|
||||
FileSet *token.FileSet
|
||||
ReadFileFunc analysisinternal.ReadFileFunc
|
||||
Diagnostics []analysis.Diagnostic
|
||||
}
|
||||
|
||||
// ApplyFixes attempts to apply the first suggested fix associated
|
||||
// with each diagnostic reported by the specified actions.
|
||||
// All fixes must have been validated by [analysisinternal.ValidateFixes].
|
||||
//
|
||||
// Each fix is treated as an independent change; fixes are merged in
|
||||
// an arbitrary deterministic order as if by a three-way diff tool
|
||||
// such as the UNIX diff3 command or 'git merge'. Any fix that cannot be
|
||||
// cleanly merged is discarded, in which case the final summary tells
|
||||
// the user to re-run the tool.
|
||||
// TODO(adonovan): make the checker tool re-run the analysis itself.
|
||||
//
|
||||
// When the same file is analyzed as a member of both a primary
|
||||
// package "p" and a test-augmented package "p [p.test]", there may be
|
||||
// duplicate diagnostics and fixes. One set of fixes will be applied
|
||||
// and the other will be discarded; but re-running the tool may then
|
||||
// show zero fixes, which may cause the confused user to wonder what
|
||||
// happened to the other ones.
|
||||
// TODO(adonovan): consider pre-filtering completely identical fixes.
|
||||
//
|
||||
// A common reason for overlapping fixes is duplicate additions of the
|
||||
// same import. The merge algorithm may often cleanly resolve such
|
||||
// fixes, coalescing identical edits, but the merge may sometimes be
|
||||
// confused by nearby changes.
|
||||
//
|
||||
// Even when merging succeeds, there is no guarantee that the
|
||||
// composition of the two fixes is semantically correct. Coalescing
|
||||
// identical edits is appropriate for imports, but not for, say,
|
||||
// increments to a counter variable; the correct resolution in that
|
||||
// case might be to increment it twice. Or consider two fixes that
|
||||
// each delete the penultimate reference to an import or local
|
||||
// variable: each fix is sound individually, and they may be textually
|
||||
// distant from each other, but when both are applied, the program is
|
||||
// no longer valid because it has an unreferenced import or local
|
||||
// variable.
|
||||
// TODO(adonovan): investigate replacing the final "gofmt" step with a
|
||||
// formatter that applies the unused-import deletion logic of
|
||||
// "goimports".
|
||||
//
|
||||
// Merging depends on both the order of fixes and they order of edits
|
||||
// within them. For example, if three fixes add import "a" twice and
|
||||
// import "b" once, the two imports of "a" may be combined if they
|
||||
// appear in order [a, a, b], or not if they appear as [a, b, a].
|
||||
// TODO(adonovan): investigate an algebraic approach to imports;
|
||||
// that is, for fixes to Go source files, convert changes within the
|
||||
// import(...) portion of the file into semantic edits, compose those
|
||||
// edits algebraically, then convert the result back to edits.
|
||||
//
|
||||
// applyFixes returns success if all fixes are valid, could be cleanly
|
||||
// merged, and the corresponding files were successfully updated.
|
||||
//
|
||||
// If the -diff flag was set, instead of updating the files it display the final
|
||||
// patch composed of all the cleanly merged fixes.
|
||||
//
|
||||
// TODO(adonovan): handle file-system level aliases such as symbolic
|
||||
// links using robustio.FileID.
|
||||
func ApplyFixes(actions []FixAction, verbose bool) error {
|
||||
// Select fixes to apply.
|
||||
//
|
||||
// If there are several for a given Diagnostic, choose the first.
|
||||
// Preserve the order of iteration, for determinism.
|
||||
type fixact struct {
|
||||
fix *analysis.SuggestedFix
|
||||
act FixAction
|
||||
}
|
||||
var fixes []*fixact
|
||||
for _, act := range actions {
|
||||
for _, diag := range act.Diagnostics {
|
||||
for i := range diag.SuggestedFixes {
|
||||
fix := &diag.SuggestedFixes[i]
|
||||
if i == 0 {
|
||||
fixes = append(fixes, &fixact{fix, act})
|
||||
} else {
|
||||
// TODO(adonovan): abstract the logger.
|
||||
log.Printf("%s: ignoring alternative fix %q", act.Name, fix.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Read file content on demand, from the virtual
|
||||
// file system that fed the analyzer (see #62292).
|
||||
//
|
||||
// This cache assumes that all successful reads for the same
|
||||
// file name return the same content.
|
||||
// (It is tempting to group fixes by package and do the
|
||||
// merge/apply/format steps one package at a time, but
|
||||
// packages are not disjoint, due to test variants, so this
|
||||
// would not really address the issue.)
|
||||
baselineContent := make(map[string][]byte)
|
||||
getBaseline := func(readFile analysisinternal.ReadFileFunc, filename string) ([]byte, error) {
|
||||
content, ok := baselineContent[filename]
|
||||
if !ok {
|
||||
var err error
|
||||
content, err = readFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
baselineContent[filename] = content
|
||||
}
|
||||
return content, nil
|
||||
}
|
||||
|
||||
// Apply each fix, updating the current state
|
||||
// only if the entire fix can be cleanly merged.
|
||||
accumulatedEdits := make(map[string][]diff.Edit)
|
||||
goodFixes := 0
|
||||
fixloop:
|
||||
for _, fixact := range fixes {
|
||||
// Convert analysis.TextEdits to diff.Edits, grouped by file.
|
||||
// Precondition: a prior call to validateFix succeeded.
|
||||
fileEdits := make(map[string][]diff.Edit)
|
||||
for _, edit := range fixact.fix.TextEdits {
|
||||
file := fixact.act.FileSet.File(edit.Pos)
|
||||
|
||||
baseline, err := getBaseline(fixact.act.ReadFileFunc, file.Name())
|
||||
if err != nil {
|
||||
log.Printf("skipping fix to file %s: %v", file.Name(), err)
|
||||
continue fixloop
|
||||
}
|
||||
|
||||
// We choose to treat size mismatch as a serious error,
|
||||
// as it indicates a concurrent write to at least one file,
|
||||
// and possibly others (consider a git checkout, for example).
|
||||
if file.Size() != len(baseline) {
|
||||
return fmt.Errorf("concurrent file modification detected in file %s (size changed from %d -> %d bytes); aborting fix",
|
||||
file.Name(), file.Size(), len(baseline))
|
||||
}
|
||||
|
||||
fileEdits[file.Name()] = append(fileEdits[file.Name()], diff.Edit{
|
||||
Start: file.Offset(edit.Pos),
|
||||
End: file.Offset(edit.End),
|
||||
New: string(edit.NewText),
|
||||
})
|
||||
}
|
||||
|
||||
// Apply each set of edits by merging atop
|
||||
// the previous accumulated state.
|
||||
after := make(map[string][]diff.Edit)
|
||||
for file, edits := range fileEdits {
|
||||
if prev := accumulatedEdits[file]; len(prev) > 0 {
|
||||
merged, ok := diff.Merge(prev, edits)
|
||||
if !ok {
|
||||
// debugging
|
||||
if false {
|
||||
log.Printf("%s: fix %s conflicts", fixact.act.Name, fixact.fix.Message)
|
||||
}
|
||||
continue fixloop // conflict
|
||||
}
|
||||
edits = merged
|
||||
}
|
||||
after[file] = edits
|
||||
}
|
||||
|
||||
// The entire fix applied cleanly; commit it.
|
||||
goodFixes++
|
||||
maps.Copy(accumulatedEdits, after)
|
||||
// debugging
|
||||
if false {
|
||||
log.Printf("%s: fix %s applied", fixact.act.Name, fixact.fix.Message)
|
||||
}
|
||||
}
|
||||
badFixes := len(fixes) - goodFixes
|
||||
|
||||
// Show diff or update files to final state.
|
||||
var files []string
|
||||
for file := range accumulatedEdits {
|
||||
files = append(files, file)
|
||||
}
|
||||
sort.Strings(files) // for deterministic -diff
|
||||
var filesUpdated, totalFiles int
|
||||
for _, file := range files {
|
||||
edits := accumulatedEdits[file]
|
||||
if len(edits) == 0 {
|
||||
continue // the diffs annihilated (a miracle?)
|
||||
}
|
||||
|
||||
// Apply accumulated fixes.
|
||||
baseline := baselineContent[file] // (cache hit)
|
||||
final, err := diff.ApplyBytes(baseline, edits)
|
||||
if err != nil {
|
||||
log.Fatalf("internal error in diff.ApplyBytes: %v", err)
|
||||
}
|
||||
|
||||
// Attempt to format each file.
|
||||
if formatted, err := format.Source(final); err == nil {
|
||||
final = formatted
|
||||
}
|
||||
|
||||
if diffFlag {
|
||||
// Since we formatted the file, we need to recompute the diff.
|
||||
unified := diff.Unified(file+" (old)", file+" (new)", string(baseline), string(final))
|
||||
// TODO(adonovan): abstract the I/O.
|
||||
os.Stdout.WriteString(unified)
|
||||
|
||||
} else {
|
||||
// write
|
||||
totalFiles++
|
||||
// TODO(adonovan): abstract the I/O.
|
||||
if err := os.WriteFile(file, final, 0644); err != nil {
|
||||
log.Println(err)
|
||||
continue
|
||||
}
|
||||
filesUpdated++
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(adonovan): consider returning a structured result that
|
||||
// maps each SuggestedFix to its status:
|
||||
// - invalid
|
||||
// - secondary, not selected
|
||||
// - applied
|
||||
// - had conflicts.
|
||||
// and a mapping from each affected file to:
|
||||
// - its final/original content pair, and
|
||||
// - whether formatting was successful.
|
||||
// Then file writes and the UI can be applied by the caller
|
||||
// in whatever form they like.
|
||||
|
||||
// If victory was incomplete, report an error that indicates partial progress.
|
||||
//
|
||||
// badFixes > 0 indicates that we decided not to attempt some
|
||||
// fixes due to conflicts or failure to read the source; still
|
||||
// it's a relatively benign situation since the user can
|
||||
// re-run the tool, and we may still make progress.
|
||||
//
|
||||
// filesUpdated < totalFiles indicates that some file updates
|
||||
// failed. This should be rare, but is a serious error as it
|
||||
// may apply half a fix, or leave the files in a bad state.
|
||||
//
|
||||
// These numbers are potentially misleading:
|
||||
// The denominator includes duplicate conflicting fixes due to
|
||||
// common files in packages "p" and "p [p.test]", which may
|
||||
// have been fixed fixed and won't appear in the re-run.
|
||||
// TODO(adonovan): eliminate identical fixes as an initial
|
||||
// filtering step.
|
||||
//
|
||||
// TODO(adonovan): should we log that n files were updated in case of total victory?
|
||||
if badFixes > 0 || filesUpdated < totalFiles {
|
||||
if diffFlag {
|
||||
return fmt.Errorf("%d of %d fixes skipped (e.g. due to conflicts)", badFixes, len(fixes))
|
||||
} else {
|
||||
return fmt.Errorf("applied %d of %d fixes; %d files updated. (Re-run the command to apply more.)",
|
||||
goodFixes, len(fixes), filesUpdated)
|
||||
}
|
||||
}
|
||||
|
||||
if verbose {
|
||||
log.Printf("applied %d fixes, updated %d files", len(fixes), filesUpdated)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
13
src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go
generated
vendored
13
src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go
generated
vendored
|
|
@ -2,8 +2,9 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package analysisflags defines helpers for processing flags of
|
||||
// analysis driver tools.
|
||||
// Package analysisflags defines helpers for processing flags (-help,
|
||||
// -json, -fix, -diff, etc) common to unitchecker and
|
||||
// {single,multi}checker. It is not intended for broader use.
|
||||
package analysisflags
|
||||
|
||||
import (
|
||||
|
|
@ -24,8 +25,10 @@ import (
|
|||
|
||||
// flags common to all {single,multi,unit}checkers.
|
||||
var (
|
||||
JSON = false // -json
|
||||
Context = -1 // -c=N: if N>0, display offending line plus N lines of context
|
||||
JSON = false // -json
|
||||
Context = -1 // -c=N: if N>0, display offending line plus N lines of context
|
||||
Fix bool // -fix
|
||||
diffFlag bool // -diff (changes [ApplyFixes] behavior)
|
||||
)
|
||||
|
||||
// Parse creates a flag for each of the analyzer's flags,
|
||||
|
|
@ -74,6 +77,8 @@ func Parse(analyzers []*analysis.Analyzer, multi bool) []*analysis.Analyzer {
|
|||
// flags common to all checkers
|
||||
flag.BoolVar(&JSON, "json", JSON, "emit JSON output")
|
||||
flag.IntVar(&Context, "c", Context, `display offending line with this many lines of context`)
|
||||
flag.BoolVar(&Fix, "fix", false, "apply all suggested fixes")
|
||||
flag.BoolVar(&diffFlag, "diff", false, "with -fix, don't update the files, but print a unified diff")
|
||||
|
||||
// Add shims for legacy vet flags to enable existing
|
||||
// scripts that run vet to continue to work.
|
||||
|
|
|
|||
98
src/cmd/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go
generated
vendored
98
src/cmd/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go
generated
vendored
|
|
@ -85,6 +85,18 @@ type Config struct {
|
|||
// -V=full describe executable for build caching
|
||||
// foo.cfg perform separate modular analyze on the single
|
||||
// unit described by a JSON config file foo.cfg.
|
||||
//
|
||||
// Also, subject to approval of proposal #71859:
|
||||
//
|
||||
// -fix don't print each diagnostic, apply its first fix
|
||||
// -diff don't apply a fix, print the diff (requires -fix)
|
||||
//
|
||||
// Additionally, the environment variable GOVET has the value "vet" or
|
||||
// "fix" depending on whether the command is being invoked by "go vet",
|
||||
// to report diagnostics, or "go fix", to apply fixes. This is
|
||||
// necessary so that callers of Main can select their analyzer suite
|
||||
// before flag parsing. (Vet analyzers must report real code problems,
|
||||
// whereas Fix analyzers may fix non-problems such as style issues.)
|
||||
func Main(analyzers ...*analysis.Analyzer) {
|
||||
progname := filepath.Base(os.Args[0])
|
||||
log.SetFlags(0)
|
||||
|
|
@ -136,35 +148,14 @@ func Run(configFile string, analyzers []*analysis.Analyzer) {
|
|||
log.Fatal(err)
|
||||
}
|
||||
|
||||
code := 0
|
||||
|
||||
// In VetxOnly mode, the analysis is run only for facts.
|
||||
if !cfg.VetxOnly {
|
||||
if analysisflags.JSON {
|
||||
// JSON output
|
||||
tree := make(analysisflags.JSONTree)
|
||||
for _, res := range results {
|
||||
tree.Add(fset, cfg.ID, res.a.Name, res.diagnostics, res.err)
|
||||
}
|
||||
tree.Print(os.Stdout)
|
||||
} else {
|
||||
// plain text
|
||||
exit := 0
|
||||
for _, res := range results {
|
||||
if res.err != nil {
|
||||
log.Println(res.err)
|
||||
exit = 1
|
||||
}
|
||||
}
|
||||
for _, res := range results {
|
||||
for _, diag := range res.diagnostics {
|
||||
analysisflags.PrintPlain(os.Stderr, fset, analysisflags.Context, diag)
|
||||
exit = 1
|
||||
}
|
||||
}
|
||||
os.Exit(exit)
|
||||
}
|
||||
code = processResults(fset, cfg.ID, results)
|
||||
}
|
||||
|
||||
os.Exit(0)
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func readConfig(filename string) (*Config, error) {
|
||||
|
|
@ -185,6 +176,63 @@ func readConfig(filename string) (*Config, error) {
|
|||
return cfg, nil
|
||||
}
|
||||
|
||||
func processResults(fset *token.FileSet, id string, results []result) (exit int) {
|
||||
if analysisflags.Fix {
|
||||
// Don't print the diagnostics,
|
||||
// but apply all fixes from the root actions.
|
||||
|
||||
// Convert results to form needed by ApplyFixes.
|
||||
fixActions := make([]analysisflags.FixAction, len(results))
|
||||
for i, res := range results {
|
||||
fixActions[i] = analysisflags.FixAction{
|
||||
Name: res.a.Name,
|
||||
FileSet: fset,
|
||||
ReadFileFunc: os.ReadFile,
|
||||
Diagnostics: res.diagnostics,
|
||||
}
|
||||
}
|
||||
if err := analysisflags.ApplyFixes(fixActions, false); err != nil {
|
||||
// Fail when applying fixes failed.
|
||||
log.Print(err)
|
||||
exit = 1
|
||||
}
|
||||
|
||||
// Don't proceed to print text/JSON,
|
||||
// and don't report an error
|
||||
// just because there were diagnostics.
|
||||
return
|
||||
}
|
||||
|
||||
// Keep consistent with analogous logic in
|
||||
// printDiagnostics in ../internal/checker/checker.go.
|
||||
|
||||
if analysisflags.JSON {
|
||||
// JSON output
|
||||
tree := make(analysisflags.JSONTree)
|
||||
for _, res := range results {
|
||||
tree.Add(fset, id, res.a.Name, res.diagnostics, res.err)
|
||||
}
|
||||
tree.Print(os.Stdout) // ignore error
|
||||
|
||||
} else {
|
||||
// plain text
|
||||
for _, res := range results {
|
||||
if res.err != nil {
|
||||
log.Println(res.err)
|
||||
exit = 1
|
||||
}
|
||||
}
|
||||
for _, res := range results {
|
||||
for _, diag := range res.diagnostics {
|
||||
analysisflags.PrintPlain(os.Stderr, fset, analysisflags.Context, diag)
|
||||
exit = 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type factImporter = func(pkgPath string) ([]byte, error)
|
||||
|
||||
// These four hook variables are a proof of concept of a future
|
||||
|
|
|
|||
177
src/cmd/vendor/golang.org/x/tools/internal/diff/diff.go
generated
vendored
Normal file
177
src/cmd/vendor/golang.org/x/tools/internal/diff/diff.go
generated
vendored
Normal file
|
|
@ -0,0 +1,177 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package diff computes differences between text files or strings.
|
||||
package diff
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// An Edit describes the replacement of a portion of a text file.
|
||||
type Edit struct {
|
||||
Start, End int // byte offsets of the region to replace
|
||||
New string // the replacement
|
||||
}
|
||||
|
||||
func (e Edit) String() string {
|
||||
return fmt.Sprintf("{Start:%d,End:%d,New:%q}", e.Start, e.End, e.New)
|
||||
}
|
||||
|
||||
// Apply applies a sequence of edits to the src buffer and returns the
|
||||
// result. Edits are applied in order of start offset; edits with the
|
||||
// same start offset are applied in they order they were provided.
|
||||
//
|
||||
// Apply returns an error if any edit is out of bounds,
|
||||
// or if any pair of edits is overlapping.
|
||||
func Apply(src string, edits []Edit) (string, error) {
|
||||
edits, size, err := validate(src, edits)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Apply edits.
|
||||
out := make([]byte, 0, size)
|
||||
lastEnd := 0
|
||||
for _, edit := range edits {
|
||||
if lastEnd < edit.Start {
|
||||
out = append(out, src[lastEnd:edit.Start]...)
|
||||
}
|
||||
out = append(out, edit.New...)
|
||||
lastEnd = edit.End
|
||||
}
|
||||
out = append(out, src[lastEnd:]...)
|
||||
|
||||
if len(out) != size {
|
||||
panic("wrong size")
|
||||
}
|
||||
|
||||
return string(out), nil
|
||||
}
|
||||
|
||||
// ApplyBytes is like Apply, but it accepts a byte slice.
|
||||
// The result is always a new array.
|
||||
func ApplyBytes(src []byte, edits []Edit) ([]byte, error) {
|
||||
res, err := Apply(string(src), edits)
|
||||
return []byte(res), err
|
||||
}
|
||||
|
||||
// validate checks that edits are consistent with src,
|
||||
// and returns the size of the patched output.
|
||||
// It may return a different slice.
|
||||
func validate(src string, edits []Edit) ([]Edit, int, error) {
|
||||
if !sort.IsSorted(editsSort(edits)) {
|
||||
edits = slices.Clone(edits)
|
||||
SortEdits(edits)
|
||||
}
|
||||
|
||||
// Check validity of edits and compute final size.
|
||||
size := len(src)
|
||||
lastEnd := 0
|
||||
for _, edit := range edits {
|
||||
if !(0 <= edit.Start && edit.Start <= edit.End && edit.End <= len(src)) {
|
||||
return nil, 0, fmt.Errorf("diff has out-of-bounds edits")
|
||||
}
|
||||
if edit.Start < lastEnd {
|
||||
return nil, 0, fmt.Errorf("diff has overlapping edits")
|
||||
}
|
||||
size += len(edit.New) + edit.Start - edit.End
|
||||
lastEnd = edit.End
|
||||
}
|
||||
|
||||
return edits, size, nil
|
||||
}
|
||||
|
||||
// SortEdits orders a slice of Edits by (start, end) offset.
|
||||
// This ordering puts insertions (end = start) before deletions
|
||||
// (end > start) at the same point, but uses a stable sort to preserve
|
||||
// the order of multiple insertions at the same point.
|
||||
// (Apply detects multiple deletions at the same point as an error.)
|
||||
func SortEdits(edits []Edit) {
|
||||
sort.Stable(editsSort(edits))
|
||||
}
|
||||
|
||||
type editsSort []Edit
|
||||
|
||||
func (a editsSort) Len() int { return len(a) }
|
||||
func (a editsSort) Less(i, j int) bool {
|
||||
if cmp := a[i].Start - a[j].Start; cmp != 0 {
|
||||
return cmp < 0
|
||||
}
|
||||
return a[i].End < a[j].End
|
||||
}
|
||||
func (a editsSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
// lineEdits expands and merges a sequence of edits so that each
|
||||
// resulting edit replaces one or more complete lines.
|
||||
// See ApplyEdits for preconditions.
|
||||
func lineEdits(src string, edits []Edit) ([]Edit, error) {
|
||||
edits, _, err := validate(src, edits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do all deletions begin and end at the start of a line,
|
||||
// and all insertions end with a newline?
|
||||
// (This is merely a fast path.)
|
||||
for _, edit := range edits {
|
||||
if edit.Start >= len(src) || // insertion at EOF
|
||||
edit.Start > 0 && src[edit.Start-1] != '\n' || // not at line start
|
||||
edit.End > 0 && src[edit.End-1] != '\n' || // not at line start
|
||||
edit.New != "" && edit.New[len(edit.New)-1] != '\n' { // partial insert
|
||||
goto expand // slow path
|
||||
}
|
||||
}
|
||||
return edits, nil // aligned
|
||||
|
||||
expand:
|
||||
if len(edits) == 0 {
|
||||
return edits, nil // no edits (unreachable due to fast path)
|
||||
}
|
||||
expanded := make([]Edit, 0, len(edits)) // a guess
|
||||
prev := edits[0]
|
||||
// TODO(adonovan): opt: start from the first misaligned edit.
|
||||
// TODO(adonovan): opt: avoid quadratic cost of string += string.
|
||||
for _, edit := range edits[1:] {
|
||||
between := src[prev.End:edit.Start]
|
||||
if !strings.Contains(between, "\n") {
|
||||
// overlapping lines: combine with previous edit.
|
||||
prev.New += between + edit.New
|
||||
prev.End = edit.End
|
||||
} else {
|
||||
// non-overlapping lines: flush previous edit.
|
||||
expanded = append(expanded, expandEdit(prev, src))
|
||||
prev = edit
|
||||
}
|
||||
}
|
||||
return append(expanded, expandEdit(prev, src)), nil // flush final edit
|
||||
}
|
||||
|
||||
// expandEdit returns edit expanded to complete whole lines.
|
||||
func expandEdit(edit Edit, src string) Edit {
|
||||
// Expand start left to start of line.
|
||||
// (delta is the zero-based column number of start.)
|
||||
start := edit.Start
|
||||
if delta := start - 1 - strings.LastIndex(src[:start], "\n"); delta > 0 {
|
||||
edit.Start -= delta
|
||||
edit.New = src[start-delta:start] + edit.New
|
||||
}
|
||||
|
||||
// Expand end right to end of line.
|
||||
end := edit.End
|
||||
if end > 0 && src[end-1] != '\n' ||
|
||||
edit.New != "" && edit.New[len(edit.New)-1] != '\n' {
|
||||
if nl := strings.IndexByte(src[end:], '\n'); nl < 0 {
|
||||
edit.End = len(src) // extend to EOF
|
||||
} else {
|
||||
edit.End = end + nl + 1 // extend beyond \n
|
||||
}
|
||||
}
|
||||
edit.New += src[end:edit.End]
|
||||
|
||||
return edit
|
||||
}
|
||||
179
src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/common.go
generated
vendored
Normal file
179
src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/common.go
generated
vendored
Normal file
|
|
@ -0,0 +1,179 @@
|
|||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lcs
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// lcs is a longest common sequence
|
||||
type lcs []diag
|
||||
|
||||
// A diag is a piece of the edit graph where A[X+i] == B[Y+i], for 0<=i<Len.
|
||||
// All computed diagonals are parts of a longest common subsequence.
|
||||
type diag struct {
|
||||
X, Y int
|
||||
Len int
|
||||
}
|
||||
|
||||
// sort sorts in place, by lowest X, and if tied, inversely by Len
|
||||
func (l lcs) sort() lcs {
|
||||
sort.Slice(l, func(i, j int) bool {
|
||||
if l[i].X != l[j].X {
|
||||
return l[i].X < l[j].X
|
||||
}
|
||||
return l[i].Len > l[j].Len
|
||||
})
|
||||
return l
|
||||
}
|
||||
|
||||
// validate that the elements of the lcs do not overlap
|
||||
// (can only happen when the two-sided algorithm ends early)
|
||||
// expects the lcs to be sorted
|
||||
func (l lcs) valid() bool {
|
||||
for i := 1; i < len(l); i++ {
|
||||
if l[i-1].X+l[i-1].Len > l[i].X {
|
||||
return false
|
||||
}
|
||||
if l[i-1].Y+l[i-1].Len > l[i].Y {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// repair overlapping lcs
|
||||
// only called if two-sided stops early
|
||||
func (l lcs) fix() lcs {
|
||||
// from the set of diagonals in l, find a maximal non-conflicting set
|
||||
// this problem may be NP-complete, but we use a greedy heuristic,
|
||||
// which is quadratic, but with a better data structure, could be D log D.
|
||||
// independent is not enough: {0,3,1} and {3,0,2} can't both occur in an lcs
|
||||
// which has to have monotone x and y
|
||||
if len(l) == 0 {
|
||||
return nil
|
||||
}
|
||||
sort.Slice(l, func(i, j int) bool { return l[i].Len > l[j].Len })
|
||||
tmp := make(lcs, 0, len(l))
|
||||
tmp = append(tmp, l[0])
|
||||
for i := 1; i < len(l); i++ {
|
||||
var dir direction
|
||||
nxt := l[i]
|
||||
for _, in := range tmp {
|
||||
if dir, nxt = overlap(in, nxt); dir == empty || dir == bad {
|
||||
break
|
||||
}
|
||||
}
|
||||
if nxt.Len > 0 && dir != bad {
|
||||
tmp = append(tmp, nxt)
|
||||
}
|
||||
}
|
||||
tmp.sort()
|
||||
if false && !tmp.valid() { // debug checking
|
||||
log.Fatalf("here %d", len(tmp))
|
||||
}
|
||||
return tmp
|
||||
}
|
||||
|
||||
type direction int
|
||||
|
||||
const (
|
||||
empty direction = iota // diag is empty (so not in lcs)
|
||||
leftdown // proposed acceptably to the left and below
|
||||
rightup // proposed diag is acceptably to the right and above
|
||||
bad // proposed diag is inconsistent with the lcs so far
|
||||
)
|
||||
|
||||
// overlap trims the proposed diag prop so it doesn't overlap with
|
||||
// the existing diag that has already been added to the lcs.
|
||||
func overlap(exist, prop diag) (direction, diag) {
|
||||
if prop.X <= exist.X && exist.X < prop.X+prop.Len {
|
||||
// remove the end of prop where it overlaps with the X end of exist
|
||||
delta := prop.X + prop.Len - exist.X
|
||||
prop.Len -= delta
|
||||
if prop.Len <= 0 {
|
||||
return empty, prop
|
||||
}
|
||||
}
|
||||
if exist.X <= prop.X && prop.X < exist.X+exist.Len {
|
||||
// remove the beginning of prop where overlaps with exist
|
||||
delta := exist.X + exist.Len - prop.X
|
||||
prop.Len -= delta
|
||||
if prop.Len <= 0 {
|
||||
return empty, prop
|
||||
}
|
||||
prop.X += delta
|
||||
prop.Y += delta
|
||||
}
|
||||
if prop.Y <= exist.Y && exist.Y < prop.Y+prop.Len {
|
||||
// remove the end of prop that overlaps (in Y) with exist
|
||||
delta := prop.Y + prop.Len - exist.Y
|
||||
prop.Len -= delta
|
||||
if prop.Len <= 0 {
|
||||
return empty, prop
|
||||
}
|
||||
}
|
||||
if exist.Y <= prop.Y && prop.Y < exist.Y+exist.Len {
|
||||
// remove the beginning of peop that overlaps with exist
|
||||
delta := exist.Y + exist.Len - prop.Y
|
||||
prop.Len -= delta
|
||||
if prop.Len <= 0 {
|
||||
return empty, prop
|
||||
}
|
||||
prop.X += delta // no test reaches this code
|
||||
prop.Y += delta
|
||||
}
|
||||
if prop.X+prop.Len <= exist.X && prop.Y+prop.Len <= exist.Y {
|
||||
return leftdown, prop
|
||||
}
|
||||
if exist.X+exist.Len <= prop.X && exist.Y+exist.Len <= prop.Y {
|
||||
return rightup, prop
|
||||
}
|
||||
// prop can't be in an lcs that contains exist
|
||||
return bad, prop
|
||||
}
|
||||
|
||||
// manipulating Diag and lcs
|
||||
|
||||
// prepend a diagonal (x,y)-(x+1,y+1) segment either to an empty lcs
|
||||
// or to its first Diag. prepend is only called to extend diagonals
|
||||
// the backward direction.
|
||||
func (lcs lcs) prepend(x, y int) lcs {
|
||||
if len(lcs) > 0 {
|
||||
d := &lcs[0]
|
||||
if int(d.X) == x+1 && int(d.Y) == y+1 {
|
||||
// extend the diagonal down and to the left
|
||||
d.X, d.Y = int(x), int(y)
|
||||
d.Len++
|
||||
return lcs
|
||||
}
|
||||
}
|
||||
|
||||
r := diag{X: int(x), Y: int(y), Len: 1}
|
||||
lcs = append([]diag{r}, lcs...)
|
||||
return lcs
|
||||
}
|
||||
|
||||
// append appends a diagonal, or extends the existing one.
|
||||
// by adding the edge (x,y)-(x+1.y+1). append is only called
|
||||
// to extend diagonals in the forward direction.
|
||||
func (lcs lcs) append(x, y int) lcs {
|
||||
if len(lcs) > 0 {
|
||||
last := &lcs[len(lcs)-1]
|
||||
// Expand last element if adjoining.
|
||||
if last.X+last.Len == x && last.Y+last.Len == y {
|
||||
last.Len++
|
||||
return lcs
|
||||
}
|
||||
}
|
||||
|
||||
return append(lcs, diag{X: x, Y: y, Len: 1})
|
||||
}
|
||||
|
||||
// enforce constraint on d, k
|
||||
func ok(d, k int) bool {
|
||||
return d >= 0 && -d <= k && k <= d
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue