[dev.simd] all: merge master (5945fc0) into dev.simd

Merge List:

+ 2025-12-03 5945fc02fc doc/next: delete
+ 2025-12-03 dcc5fe0c62 api: promote next to go1.26
+ 2025-12-03 7991da1161 crypto/hpke: remove unused hybridKEM field
+ 2025-12-03 2729e87aa5 doc/next: pluralize 'result'
+ 2025-12-03 6e72f526cd doc/next/6-stdlib/99-minor/go/ast/76031.md: add BasicLit caveat
+ 2025-12-03 fa30b68767 go/{ast,doc}: update BasicLit.ValueEnd as well as ValuePos
+ 2025-12-03 32a9804c7b cmd/link: don't update offset of existing ELF section name
+ 2025-12-02 509ddf3868 cmd/compile: ensure bloop only kept alive addressable nodes
+ 2025-12-02 7cab1b1b26 doc: pre-announce removal of gotypesalias and asynctimerchan GODEBUG flags
+ 2025-12-02 1a64db3a4b spec: remove restriction on channel element types for close built-in (bug fix)
+ 2025-12-02 2e06fa6b68 doc/next: release note for scheduler metrics
+ 2025-12-02 77c795011b doc/next: document cgo call overhead improvement
+ 2025-12-02 6e4abe8cef doc: mention stack allocation of slices
+ 2025-12-02 88c24de8b5 doc/next: add section for Green Tea
+ 2025-12-02 043b9de658 net: parse addresses without separators in ParseMac
+ 2025-12-02 e432b4f3a1 cmd/compile: more generated equality function tests
+ 2025-12-02 c1acdcb345 crypto/x509: prevent HostnameError.Error() from consuming excessive resource
+ 2025-12-02 8ae5d408ed spec: more precise prose for built-in function new
+ 2025-12-02 c5c05a0e43 cmd/go: add test checking version with experiment is valid
+ 2025-12-01 f22d37d574 runtime/internal/testprog: log initial SchedMetrics GOMAXPROCS
+ 2025-12-01 8b5db48db1 net/http: deflake TestClientConnReserveAndConsume
+ 2025-12-01 94616dad42 internal/runtime/cgroup: remove duplicate readString definition
+ 2025-12-01 67851547d8 internal/runtime/cgroup: lineReader fuzz test
+ 2025-12-01 ac3e0ae51a doc: document go tool pprof -http default change
+ 2025-12-01 42e03bbd27 debug/elf: correct case of DWARF in comment
+ 2025-12-01 18015e8c36 doc/next: clean up some Go 1.26 release notes
+ 2025-12-01 4be545115c cmd/pprof: update vendored github.com/google/pprof
+ 2025-12-01 16c0f7e152 cmd/compile: run go generate for internal/ir
+ 2025-12-01 dc913c316a all: update vendored dependencies
+ 2025-12-01 1555fad47d vendor/golang.org/x/tools: update to 1ad6f3d
+ 2025-12-01 eec1afeb28 debug/elf: make check for empty symbol section consistent for 64-bit and 32-bit binaries
+ 2025-11-28 3f94f3d4b2 test/codegen: fix shift tests on riscv64
+ 2025-11-28 2ac1f9cbc3 cmd/compile: avoid unnecessary interface conversion in bloop
+ 2025-11-28 de456450e7 runtime/secret: disable tests under memory validating modes
+ 2025-11-27 67d4a28707 fmt: document space behavior of Append
+ 2025-11-27 c079dd13c0 runtime/secret: reorganize tests to fix -buildmode=shared
+ 2025-11-27 2947cb0469 runtime/_mkmalloc: fix log.Fatal formatting directive
+ 2025-11-26 cead111a77 internal/runtime/cgroup: stricter unescapePath
+ 2025-11-26 c2af9f14b4 internal/runtime/cgroup: fix path on non-root mount point
+ 2025-11-26 6be5de4bc4 internal/runtime/cgroup: simplify escapePath in test
+ 2025-11-26 481c6df7b9 io: reduce intermediate allocations in ReadAll and have a smaller final result
+ 2025-11-26 cec4d4303f os: allow direntries to have zero inodes on Linux
+ 2025-11-26 f1bbc66a10 cmd/link: test that moduledata is in its own section
+ 2025-11-26 003f52407a cmd/link: test that findfunctab is in gopclntab section
+ 2025-11-26 21b6ab57d5 cmd/link: test that funcdata values are in gopclntab section
+ 2025-11-26 c03e25a263 cmd/link: always run current linker in tests
+ 2025-11-26 9f5cd43fe6 cmd/link: put moduledata in its own .go.module section
+ 2025-11-26 43cfd785e7 cmd/link, runtime, debug/gosym: move pclntab magic to internal/abi
+ 2025-11-26 312b2034a4 cmd/link: put runtime.findfunctab in the .gopclntab section
+ 2025-11-26 b437d5bf36 cmd/link: put funcdata symbols in .gopclntab section
+ 2025-11-26 4bc3410b6c cmd/link: build shstrtab from ELF sections
+ 2025-11-26 b0c278be40 cmd/link: use shdr as a slice rather than counting in elfhdr.Shnum
+ 2025-11-26 0ff323143d cmd/link: sort allocated ELF section headers by address
+ 2025-11-26 4879151d1d cmd/compile: introduce alias analysis and automatically free non-aliased memory after growslice
+ 2025-11-26 d8269ab0d5 cmd/link, cmd/internal/obj: fix a remote call failure issue
+ 2025-11-26 c6d64f8556 cmd/internal/obj/loong64: remove the incorrect unsigned instructions
+ 2025-11-26 c048a9a11f go/types, types2: remove InvalidTypeCycle from literals.go
+ 2025-11-26 ff2fd6327e go/types, types2: remove setDefType and most def plumbing
+ 2025-11-26 3531ac23d4 go/types, types2: replace setDefType with pending type check
+ 2025-11-26 2b8dbb35b0 crypto,testing/cryptotest: ignore random io.Reader params, add SetGlobalRandom
+ 2025-11-26 21ebed0ac0 runtime: update mkmalloc to make generated code look nicer
+ 2025-11-26 a3fb92a710 runtime/secret: implement new secret package
+ 2025-11-26 0c747b7aa7 go/build/constraint: use strings.Builder instead of for { str+=str }
+ 2025-11-26 0f6397384b go/types: relax NewSignatureType for append(slice, str...)
+ 2025-11-26 992ad55e3d crypto/tls: support crypto.MessageSigner private keys
+ 2025-11-26 3fd9cb1895 cmd/compile: fix bloop get name logic
+ 2025-11-26 3353c100bb cmd/go: remove experiment checks for compile -c
+ 2025-11-26 301d9f9b52 doc/next: document broken freebsd/riscv64 port
+ 2025-11-26 de39282332 cmd/compile, runtime: guard X15 zeroing with GOEXPERIMENT=simd
+ 2025-11-26 86bbea0cfa crypto/fips140: add WithoutEnforcement
+ 2025-11-26 e2cae9ecdf crypto/x509: add ExtKeyUsage.OID method
+ 2025-11-26 623ef28135 cmd/go: limit total compile -c backend concurrency using a pool
+ 2025-11-26 3c6bf6fbf3 cmd/compile: handle loops better during stack allocation of slices
+ 2025-11-26 efe9ad501d go/types, types2: improve printing of []*operand lists (debugging support)
+ 2025-11-26 ac3369242d runtime: merge all the linux 32 and 64 bits files into one for each
+ 2025-11-26 fb5156a098 testing: fix bloop doc
+ 2025-11-26 b194f5d24a os,internal/syscall/windows: support O_* flags in Root.OpenFile
+ 2025-11-26 e0a4dffb0c cmd/internal/obj/loong64: add {,x}vmadd series instructions support
+ 2025-11-26 c0f02c11ff cmd/internal/obj/loong64: add aliases to 32-bit arithmetic instructions
+ 2025-11-26 37ce4adcd4 cmd/compile: add tests bruteforcing limit complement
+ 2025-11-26 437d2362ce os,internal/poll: don't call IsNonblock for consoles and Stdin
+ 2025-11-26 71f8f031b2 crypto/internal/fips140/aes: optimize ctrBlocks8Asm on amd64
+ 2025-11-26 03fcb33c0e cmd/compile: add tests bruteforcing limit negation and improve limit addition
+ 2025-11-26 dda7c8253d cmd/compile,internal/bytealg: add MemEq intrinsic for runtime.memequal
+ 2025-11-26 4976606a2f cmd/go: remove final references to modfetch.Fetcher_
+ 2025-11-26 08bf23cb97 cmd/go/internal/toolchain: remove references to modfetch.Fetcher_
+ 2025-11-26 46d5e3ea0e cmd/go/internal/modget: remove references to modfetch.Fetcher_
+ 2025-11-26 a3a6c9f62a cmd/go/internal/load: remove references to modfetch.Fetcher_
+ 2025-11-26 c1ef3d5881 cmd/go/internal/modcmd: remove references to modfetch.Fetcher_
+ 2025-11-26 ab2829ec06 cmd/compile: adjust start heap size
+ 2025-11-26 54b82e944e internal/trace: support event constructor for testing
+ 2025-11-25 eb63ef9d66 runtime: panic if cleanup function closes over cleanup pointer
+ 2025-11-25 06412288cf runtime: panic on AddCleanup with self pointer
+ 2025-11-25 03f499ec46 cmd/go/internal/modfetch: remove references to Fetcher_ in test file
+ 2025-11-25 da31fd4177 cmd/go/internal/modload: replace references to modfetch.Fetcher_
+ 2025-11-25 07b10e97d6 cmd/go/internal/modcmd: inject modfetch.Fetcher_ into DownloadModule
+ 2025-11-25 e96094402d cmd/go/internal/modload: inject modfetch.Fetcher_ into commitRequirements
+ 2025-11-25 47baf48890 cmd/go/internal/modfetch: inject Fetcher_ into TidyGoSum
+ 2025-11-25 272df5f6ba crypto/internal/fips140/aes/gcm: add more GCM nonce modes
+ 2025-11-25 1768cb40b8 crypto/tls: add SecP256r1/SecP384r1MLKEM1024 hybrid post-quantum key exchanges
+ 2025-11-25 a9093067ee cmd/internal/obj/loong64: add {,X}V{ADD,SUB}W{EV,OD}.{H.B,W.H,D.W,Q.D}{,U} instructions support
+ 2025-11-25 7b904c25a2 cmd/go/internal/modfetch: move global goSum to Fetcher_
+ 2025-11-25 e7358c6cf4 cmd/go: remove fips140 dependency on global Fetcher_
+ 2025-11-25 89f6dba7e6 internal/strconv: add testbase tests
+ 2025-11-25 6954be0baa internal/strconv: delete ftoaryu
+ 2025-11-25 8d6d14f5d6 compress/flate: move big non-pointer arrays to end of compressor
+ 2025-11-25 4ca048cc32 cmd/internal/obj/riscv: document compressed instructions
+ 2025-11-25 a572d571fa path: add more examples for path.Clean
+ 2025-11-25 eec40aae45 maps: use strings.EqualFold in example
+ 2025-11-25 113eb42efc strconv: replace Ryu ftoa with Dragonbox
+ 2025-11-25 6e5cfe94b0 crypto: fix dead links and correct SHA-512 algorithm comment
+ 2025-11-25 2c7c62b972 crypto/internal/fips140/sha512: interleave scheduling with rounds for 10.3% speed-up
+ 2025-11-25 5b34354bd3 crypto/internal/fips140/sha256: interleave scheduling and rounds for 11.2% speed-up
+ 2025-11-25 1cc1337f0a internal/runtime/cgroup: allow more tests to run on all OSes
+ 2025-11-25 6e4a0d8e44 crypto/internal/fips140/bigmod: vector implementation of addMulVVWx on s390x
+ 2025-11-25 657b331ff5 net/url: fix example of Values.Encode
+ 2025-11-25 bd9222b525 crypto/sha3: reduce cSHAKE allocations
+ 2025-11-25 e3088d6eb8 crypto/hpke: expose crypto/internal/hpke
+ 2025-11-25 a5ebc6b67c crypto/ecdsa: clean up ECDSA parsing and serialization paths
+ 2025-11-25 e8fdfeb72b reflect: add iterator equivalents for NumField, NumIn, NumOut and NumMethod
+ 2025-11-25 12d437c09a crypto/x509: sub-quadratic name constraint checking
+ 2025-11-25 ed4deb157e crypto/x509: cleanup name constraint tests
+ 2025-11-25 0d2baa808c crypto/rsa: add EncryptOAEPWithOptions
+ 2025-11-25 09e377b599 internal/poll: replace t.Sub(time.Now()) with time.Until in test
+ 2025-11-25 4fb7e083a8 crypto/tls: expose HelloRetryRequest state
+ 2025-11-24 31d373534e doc: pre-announce removal of 1.23 and earlier crypto GODEBUGs
+ 2025-11-24 aa093eed83 crypto/fips140: add Version
+ 2025-11-24 1dc1505d4a cmd/go/internal/modfetch: rename State to Fetcher
+ 2025-11-24 d3e11b3f90 cmd/go/internal/modload: make State.modfetchState a pointer
+ 2025-11-24 2f7fd5714f cmd/go: add setters for critical State fields
+ 2025-11-24 6851795fb6 runtime: add GODEBUG=tracebacklabels=1 to include pprof labels in tracebacks
+ 2025-11-24 0921e1db83 net/http: add Transport.NewClientConn
+ 2025-11-24 6465818435 all: update to x/net@bff14c5256
+ 2025-11-24 1a53ce9734 context: don't return the wrong error when Cause races cancellation
+ 2025-11-24 c6f882f6c5 crypto/x509: add ExtKeyUsage.String and KeyUsage.String methods
+ 2025-11-24 97d5295f6f crypto/internal/fips140test: add ML-DSA coverage
+ 2025-11-24 62cd044a79 cmd/compile: add cases for StringLen to prove
+ 2025-11-24 f1e376f342 cmd/go/internal/auth: fix typo
+ 2025-11-24 7fbd141de5 runtime: use m.profStack in traceStack
+ 2025-11-24 0bc192368a runtime: don't write unique string to trace if it's length zero
+ 2025-11-24 d4f5650cc5 all: REVERSE MERGE dev.simd (7d65463) into master

Change-Id: I4273ac3987ae2d0bc1df0051d752d8ef6c5e9af5
This commit is contained in:
David Chase 2025-12-03 15:43:09 -05:00
commit 9ac524ab70
488 changed files with 21297 additions and 5656 deletions

185
api/go1.26.txt Normal file
View file

@ -0,0 +1,185 @@
pkg bytes, method (*Buffer) Peek(int) ([]uint8, error) #73794
pkg crypto, type Decapsulator interface { Decapsulate, Encapsulator } #75300
pkg crypto, type Decapsulator interface, Decapsulate([]uint8) ([]uint8, error) #75300
pkg crypto, type Decapsulator interface, Encapsulator() Encapsulator #75300
pkg crypto, type Encapsulator interface { Bytes, Encapsulate } #75300
pkg crypto, type Encapsulator interface, Bytes() []uint8 #75300
pkg crypto, type Encapsulator interface, Encapsulate() ([]uint8, []uint8) #75300
pkg crypto/ecdh, type KeyExchanger interface { Curve, ECDH, PublicKey } #75300
pkg crypto/ecdh, type KeyExchanger interface, Curve() Curve #75300
pkg crypto/ecdh, type KeyExchanger interface, ECDH(*PublicKey) ([]uint8, error) #75300
pkg crypto/ecdh, type KeyExchanger interface, PublicKey() *PublicKey #75300
pkg crypto/ecdsa, type PrivateKey struct, D //deprecated #63963
pkg crypto/ecdsa, type PublicKey struct, X //deprecated #63963
pkg crypto/ecdsa, type PublicKey struct, Y //deprecated #63963
pkg crypto/fips140, func Enforced() bool #74630
pkg crypto/fips140, func Version() string #75301
pkg crypto/fips140, func WithoutEnforcement(func()) #74630
pkg crypto/hpke, func AES128GCM() AEAD #75300
pkg crypto/hpke, func AES256GCM() AEAD #75300
pkg crypto/hpke, func ChaCha20Poly1305() AEAD #75300
pkg crypto/hpke, func DHKEM(ecdh.Curve) KEM #75300
pkg crypto/hpke, func ExportOnly() AEAD #75300
pkg crypto/hpke, func HKDFSHA256() KDF #75300
pkg crypto/hpke, func HKDFSHA384() KDF #75300
pkg crypto/hpke, func HKDFSHA512() KDF #75300
pkg crypto/hpke, func MLKEM1024() KEM #75300
pkg crypto/hpke, func MLKEM1024P384() KEM #75300
pkg crypto/hpke, func MLKEM768() KEM #75300
pkg crypto/hpke, func MLKEM768P256() KEM #75300
pkg crypto/hpke, func MLKEM768X25519() KEM #75300
pkg crypto/hpke, func NewAEAD(uint16) (AEAD, error) #75300
pkg crypto/hpke, func NewDHKEMPrivateKey(ecdh.KeyExchanger) (PrivateKey, error) #75300
pkg crypto/hpke, func NewDHKEMPublicKey(*ecdh.PublicKey) (PublicKey, error) #75300
pkg crypto/hpke, func NewHybridPrivateKey(crypto.Decapsulator, ecdh.KeyExchanger) (PrivateKey, error) #75300
pkg crypto/hpke, func NewHybridPublicKey(crypto.Encapsulator, *ecdh.PublicKey) (PublicKey, error) #75300
pkg crypto/hpke, func NewKDF(uint16) (KDF, error) #75300
pkg crypto/hpke, func NewKEM(uint16) (KEM, error) #75300
pkg crypto/hpke, func NewMLKEMPrivateKey(crypto.Decapsulator) (PrivateKey, error) #75300
pkg crypto/hpke, func NewMLKEMPublicKey(crypto.Encapsulator) (PublicKey, error) #75300
pkg crypto/hpke, func NewRecipient([]uint8, PrivateKey, KDF, AEAD, []uint8) (*Recipient, error) #75300
pkg crypto/hpke, func NewSender(PublicKey, KDF, AEAD, []uint8) ([]uint8, *Sender, error) #75300
pkg crypto/hpke, func Open(PrivateKey, KDF, AEAD, []uint8, []uint8) ([]uint8, error) #75300
pkg crypto/hpke, func SHAKE128() KDF #75300
pkg crypto/hpke, func SHAKE256() KDF #75300
pkg crypto/hpke, func Seal(PublicKey, KDF, AEAD, []uint8, []uint8) ([]uint8, error) #75300
pkg crypto/hpke, method (*Recipient) Export(string, int) ([]uint8, error) #75300
pkg crypto/hpke, method (*Recipient) Open([]uint8, []uint8) ([]uint8, error) #75300
pkg crypto/hpke, method (*Sender) Export(string, int) ([]uint8, error) #75300
pkg crypto/hpke, method (*Sender) Seal([]uint8, []uint8) ([]uint8, error) #75300
pkg crypto/hpke, type AEAD interface, ID() uint16 #75300
pkg crypto/hpke, type AEAD interface, unexported methods #75300
pkg crypto/hpke, type KDF interface, ID() uint16 #75300
pkg crypto/hpke, type KDF interface, unexported methods #75300
pkg crypto/hpke, type KEM interface, DeriveKeyPair([]uint8) (PrivateKey, error) #75300
pkg crypto/hpke, type KEM interface, GenerateKey() (PrivateKey, error) #75300
pkg crypto/hpke, type KEM interface, ID() uint16 #75300
pkg crypto/hpke, type KEM interface, NewPrivateKey([]uint8) (PrivateKey, error) #75300
pkg crypto/hpke, type KEM interface, NewPublicKey([]uint8) (PublicKey, error) #75300
pkg crypto/hpke, type KEM interface, unexported methods #75300
pkg crypto/hpke, type PrivateKey interface, Bytes() ([]uint8, error) #75300
pkg crypto/hpke, type PrivateKey interface, KEM() KEM #75300
pkg crypto/hpke, type PrivateKey interface, PublicKey() PublicKey #75300
pkg crypto/hpke, type PrivateKey interface, unexported methods #75300
pkg crypto/hpke, type PublicKey interface, Bytes() []uint8 #75300
pkg crypto/hpke, type PublicKey interface, KEM() KEM #75300
pkg crypto/hpke, type PublicKey interface, unexported methods #75300
pkg crypto/hpke, type Recipient struct #75300
pkg crypto/hpke, type Sender struct #75300
pkg crypto/mlkem, method (*DecapsulationKey1024) Encapsulator() crypto.Encapsulator #75300
pkg crypto/mlkem, method (*DecapsulationKey768) Encapsulator() crypto.Encapsulator #75300
pkg crypto/mlkem/mlkemtest, func Encapsulate1024(*mlkem.EncapsulationKey1024, []uint8) ([]uint8, []uint8, error) #73627
pkg crypto/mlkem/mlkemtest, func Encapsulate768(*mlkem.EncapsulationKey768, []uint8) ([]uint8, []uint8, error) #73627
pkg crypto/rsa, func DecryptPKCS1v15 //deprecated #75302
pkg crypto/rsa, func DecryptPKCS1v15SessionKey //deprecated #75302
pkg crypto/rsa, func EncryptOAEPWithOptions(io.Reader, *PublicKey, []uint8, *OAEPOptions) ([]uint8, error) #65716
pkg crypto/rsa, func EncryptPKCS1v15 //deprecated #75302
pkg crypto/rsa, type PKCS1v15DecryptOptions //deprecated #75302
pkg crypto/tls, const QUICErrorEvent = 10 #75108
pkg crypto/tls, const QUICErrorEvent QUICEventKind #75108
pkg crypto/tls, const SecP256r1MLKEM768 = 4587 #71206
pkg crypto/tls, const SecP256r1MLKEM768 CurveID #71206
pkg crypto/tls, const SecP384r1MLKEM1024 = 4589 #71206
pkg crypto/tls, const SecP384r1MLKEM1024 CurveID #71206
pkg crypto/tls, type ClientHelloInfo struct, HelloRetryRequest bool #74425
pkg crypto/tls, type ConnectionState struct, HelloRetryRequest bool #74425
pkg crypto/tls, type QUICEvent struct, Err error #75108
pkg crypto/x509, func OIDFromASN1OID(asn1.ObjectIdentifier) (OID, error) #75325
pkg crypto/x509, method (ExtKeyUsage) OID() OID #75325
pkg crypto/x509, method (ExtKeyUsage) String() string #56866
pkg crypto/x509, method (KeyUsage) String() string #56866
pkg database/sql/driver, type RowsColumnScanner interface { Close, Columns, Next, ScanColumn } #67546
pkg database/sql/driver, type RowsColumnScanner interface, Close() error #67546
pkg database/sql/driver, type RowsColumnScanner interface, Columns() []string #67546
pkg database/sql/driver, type RowsColumnScanner interface, Next([]Value) error #67546
pkg database/sql/driver, type RowsColumnScanner interface, ScanColumn(interface{}, int) error #67546
pkg debug/elf, const R_LARCH_CALL36 = 110 #75562
pkg debug/elf, const R_LARCH_CALL36 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC32 = 13 #75562
pkg debug/elf, const R_LARCH_TLS_DESC32 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC64 = 14 #75562
pkg debug/elf, const R_LARCH_TLS_DESC64 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC64_HI12 = 118 #75562
pkg debug/elf, const R_LARCH_TLS_DESC64_HI12 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC64_LO20 = 117 #75562
pkg debug/elf, const R_LARCH_TLS_DESC64_LO20 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC64_PC_HI12 = 114 #75562
pkg debug/elf, const R_LARCH_TLS_DESC64_PC_HI12 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC64_PC_LO20 = 113 #75562
pkg debug/elf, const R_LARCH_TLS_DESC64_PC_LO20 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC_CALL = 120 #75562
pkg debug/elf, const R_LARCH_TLS_DESC_CALL R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC_HI20 = 115 #75562
pkg debug/elf, const R_LARCH_TLS_DESC_HI20 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC_LD = 119 #75562
pkg debug/elf, const R_LARCH_TLS_DESC_LD R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC_LO12 = 116 #75562
pkg debug/elf, const R_LARCH_TLS_DESC_LO12 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC_PCREL20_S2 = 126 #75562
pkg debug/elf, const R_LARCH_TLS_DESC_PCREL20_S2 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC_PC_HI20 = 111 #75562
pkg debug/elf, const R_LARCH_TLS_DESC_PC_HI20 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC_PC_LO12 = 112 #75562
pkg debug/elf, const R_LARCH_TLS_DESC_PC_LO12 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_GD_PCREL20_S2 = 125 #75562
pkg debug/elf, const R_LARCH_TLS_GD_PCREL20_S2 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_LD_PCREL20_S2 = 124 #75562
pkg debug/elf, const R_LARCH_TLS_LD_PCREL20_S2 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_LE_ADD_R = 122 #75562
pkg debug/elf, const R_LARCH_TLS_LE_ADD_R R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_LE_HI20_R = 121 #75562
pkg debug/elf, const R_LARCH_TLS_LE_HI20_R R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_LE_LO12_R = 123 #75562
pkg debug/elf, const R_LARCH_TLS_LE_LO12_R R_LARCH #75562
pkg errors, func AsType[$0 error](error) ($0, bool) #51945
pkg go/ast, func ParseDirective(token.Pos, string) (Directive, bool) #68021
pkg go/ast, method (*Directive) End() token.Pos #68021
pkg go/ast, method (*Directive) ParseArgs() ([]DirectiveArg, error) #68021
pkg go/ast, method (*Directive) Pos() token.Pos #68021
pkg go/ast, type BasicLit struct, ValueEnd token.Pos #76031
pkg go/ast, type Directive struct #68021
pkg go/ast, type Directive struct, Args string #68021
pkg go/ast, type Directive struct, ArgsPos token.Pos #68021
pkg go/ast, type Directive struct, Name string #68021
pkg go/ast, type Directive struct, Slash token.Pos #68021
pkg go/ast, type Directive struct, Tool string #68021
pkg go/ast, type DirectiveArg struct #68021
pkg go/ast, type DirectiveArg struct, Arg string #68021
pkg go/ast, type DirectiveArg struct, Pos token.Pos #68021
pkg go/token, method (*File) End() Pos #75849
pkg log/slog, func NewMultiHandler(...Handler) *MultiHandler #65954
pkg log/slog, method (*MultiHandler) Enabled(context.Context, Level) bool #65954
pkg log/slog, method (*MultiHandler) Handle(context.Context, Record) error #65954
pkg log/slog, method (*MultiHandler) WithAttrs([]Attr) Handler #65954
pkg log/slog, method (*MultiHandler) WithGroup(string) Handler #65954
pkg log/slog, type MultiHandler struct #65954
pkg net, method (*Dialer) DialIP(context.Context, string, netip.Addr, netip.Addr) (*IPConn, error) #49097
pkg net, method (*Dialer) DialTCP(context.Context, string, netip.AddrPort, netip.AddrPort) (*TCPConn, error) #49097
pkg net, method (*Dialer) DialUDP(context.Context, string, netip.AddrPort, netip.AddrPort) (*UDPConn, error) #49097
pkg net, method (*Dialer) DialUnix(context.Context, string, *UnixAddr, *UnixAddr) (*UnixConn, error) #49097
pkg net/http, method (*ClientConn) Available() int #75772
pkg net/http, method (*ClientConn) Close() error #75772
pkg net/http, method (*ClientConn) Err() error #75772
pkg net/http, method (*ClientConn) InFlight() int #75772
pkg net/http, method (*ClientConn) Release() #75772
pkg net/http, method (*ClientConn) Reserve() error #75772
pkg net/http, method (*ClientConn) RoundTrip(*Request) (*Response, error) #75772
pkg net/http, method (*ClientConn) SetStateHook(func(*ClientConn)) #75772
pkg net/http, method (*Transport) NewClientConn(context.Context, string, string) (*ClientConn, error) #75772
pkg net/http, type ClientConn struct #75772
pkg net/http, type HTTP2Config struct, StrictMaxConcurrentRequests bool #67813
pkg net/http/httputil, type ReverseProxy struct, Director //deprecated #73161
pkg net/netip, method (Prefix) Compare(Prefix) int #61642
pkg os, method (*Process) WithHandle(func(uintptr)) error #70352
pkg os, var ErrNoHandle error #70352
pkg reflect, method (Value) Fields() iter.Seq2[StructField, Value] #66631
pkg reflect, method (Value) Methods() iter.Seq2[Method, Value] #66631
pkg reflect, type Type interface, Fields() iter.Seq[StructField] #66631
pkg reflect, type Type interface, Ins() iter.Seq[Type] #66631
pkg reflect, type Type interface, Methods() iter.Seq[Method] #66631
pkg reflect, type Type interface, Outs() iter.Seq[Type] #66631
pkg testing, method (*B) ArtifactDir() string #71287
pkg testing, method (*F) ArtifactDir() string #71287
pkg testing, method (*T) ArtifactDir() string #71287
pkg testing, type TB interface, ArtifactDir() string #71287
pkg testing/cryptotest, func SetGlobalRandom(*testing.T, uint64) #70942

View file

@ -1,4 +0,0 @@
pkg net, method (*Dialer) DialIP(context.Context, string, netip.Addr, netip.Addr) (*IPConn, error) #49097
pkg net, method (*Dialer) DialTCP(context.Context, string, netip.AddrPort, netip.AddrPort) (*TCPConn, error) #49097
pkg net, method (*Dialer) DialUDP(context.Context, string, netip.AddrPort, netip.AddrPort) (*UDPConn, error) #49097
pkg net, method (*Dialer) DialUnix(context.Context, string, *UnixAddr, *UnixAddr) (*UnixConn, error) #49097

View file

@ -1 +0,0 @@
pkg errors, func AsType[$0 error](error) ($0, bool) #51945

View file

@ -1 +0,0 @@
pkg net/netip, method (Prefix) Compare(Prefix) int #61642

View file

@ -1,3 +0,0 @@
pkg crypto/ecdsa, type PrivateKey struct, D //deprecated #63963
pkg crypto/ecdsa, type PublicKey struct, X //deprecated #63963
pkg crypto/ecdsa, type PublicKey struct, Y //deprecated #63963

View file

@ -1,6 +0,0 @@
pkg log/slog, func NewMultiHandler(...Handler) *MultiHandler #65954
pkg log/slog, method (*MultiHandler) Enabled(context.Context, Level) bool #65954
pkg log/slog, method (*MultiHandler) Handle(context.Context, Record) error #65954
pkg log/slog, method (*MultiHandler) WithAttrs([]Attr) Handler #65954
pkg log/slog, method (*MultiHandler) WithGroup(string) Handler #65954
pkg log/slog, type MultiHandler struct #65954

View file

@ -1,5 +0,0 @@
pkg database/sql/driver, type RowsColumnScanner interface { Close, Columns, Next, ScanColumn } #67546
pkg database/sql/driver, type RowsColumnScanner interface, Close() error #67546
pkg database/sql/driver, type RowsColumnScanner interface, Columns() []string #67546
pkg database/sql/driver, type RowsColumnScanner interface, Next([]Value) error #67546
pkg database/sql/driver, type RowsColumnScanner interface, ScanColumn(interface{}, int) error #67546

View file

@ -1 +0,0 @@
pkg net/http, type HTTP2Config struct, StrictMaxConcurrentRequests bool #67813

View file

@ -1,13 +0,0 @@
pkg go/ast, func ParseDirective(token.Pos, string) (Directive, bool) #68021
pkg go/ast, method (*Directive) End() token.Pos #68021
pkg go/ast, method (*Directive) ParseArgs() ([]DirectiveArg, error) #68021
pkg go/ast, method (*Directive) Pos() token.Pos #68021
pkg go/ast, type Directive struct #68021
pkg go/ast, type Directive struct, Args string #68021
pkg go/ast, type Directive struct, ArgsPos token.Pos #68021
pkg go/ast, type Directive struct, Name string #68021
pkg go/ast, type Directive struct, Slash token.Pos #68021
pkg go/ast, type Directive struct, Tool string #68021
pkg go/ast, type DirectiveArg struct #68021
pkg go/ast, type DirectiveArg struct, Arg string #68021
pkg go/ast, type DirectiveArg struct, Pos token.Pos #68021

View file

@ -1,2 +0,0 @@
pkg os, method (*Process) WithHandle(func(uintptr)) error #70352
pkg os, var ErrNoHandle error #70352

View file

@ -1,4 +0,0 @@
pkg testing, method (*B) ArtifactDir() string #71287
pkg testing, method (*F) ArtifactDir() string #71287
pkg testing, method (*T) ArtifactDir() string #71287
pkg testing, type TB interface, ArtifactDir() string #71287

View file

@ -1 +0,0 @@
pkg net/http/httputil, type ReverseProxy struct, Director //deprecated #73161

View file

@ -1,2 +0,0 @@
pkg crypto/mlkem/mlkemtest, func Encapsulate1024(*mlkem.EncapsulationKey1024, []uint8) ([]uint8, []uint8, error) #73627
pkg crypto/mlkem/mlkemtest, func Encapsulate768(*mlkem.EncapsulationKey768, []uint8) ([]uint8, []uint8, error) #73627

View file

@ -1 +0,0 @@
pkg bytes, method (*Buffer) Peek(int) ([]uint8, error) #73794

View file

@ -1,3 +0,0 @@
pkg crypto/tls, const QUICErrorEvent = 10 #75108
pkg crypto/tls, const QUICErrorEvent QUICEventKind #75108
pkg crypto/tls, type QUICEvent struct, Err error #75108

View file

@ -1,12 +0,0 @@
pkg crypto, type Decapsulator interface { Decapsulate, Encapsulator } #75300
pkg crypto, type Decapsulator interface, Decapsulate([]uint8) ([]uint8, error) #75300
pkg crypto, type Decapsulator interface, Encapsulator() Encapsulator #75300
pkg crypto, type Encapsulator interface { Bytes, Encapsulate } #75300
pkg crypto, type Encapsulator interface, Bytes() []uint8 #75300
pkg crypto, type Encapsulator interface, Encapsulate() ([]uint8, []uint8) #75300
pkg crypto/ecdh, type KeyExchanger interface { Curve, ECDH, PublicKey } #75300
pkg crypto/ecdh, type KeyExchanger interface, Curve() Curve #75300
pkg crypto/ecdh, type KeyExchanger interface, ECDH(*PublicKey) ([]uint8, error) #75300
pkg crypto/ecdh, type KeyExchanger interface, PublicKey() *PublicKey #75300
pkg crypto/mlkem, method (*DecapsulationKey1024) Encapsulator() crypto.Encapsulator #75300
pkg crypto/mlkem, method (*DecapsulationKey768) Encapsulator() crypto.Encapsulator #75300

View file

@ -1,4 +0,0 @@
pkg crypto/rsa, func DecryptPKCS1v15 //deprecated #75302
pkg crypto/rsa, func DecryptPKCS1v15SessionKey //deprecated #75302
pkg crypto/rsa, func EncryptPKCS1v15 //deprecated #75302
pkg crypto/rsa, type PKCS1v15DecryptOptions //deprecated #75302

View file

@ -1,38 +0,0 @@
pkg debug/elf, const R_LARCH_TLS_DESC32 = 13 #75562
pkg debug/elf, const R_LARCH_TLS_DESC32 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC64 = 14 #75562
pkg debug/elf, const R_LARCH_TLS_DESC64 R_LARCH #75562
pkg debug/elf, const R_LARCH_CALL36 = 110 #75562
pkg debug/elf, const R_LARCH_CALL36 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC_PC_HI20 = 111 #75562
pkg debug/elf, const R_LARCH_TLS_DESC_PC_HI20 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC_PC_LO12 = 112 #75562
pkg debug/elf, const R_LARCH_TLS_DESC_PC_LO12 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC64_PC_LO20 = 113 #75562
pkg debug/elf, const R_LARCH_TLS_DESC64_PC_LO20 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC64_PC_HI12 = 114 #75562
pkg debug/elf, const R_LARCH_TLS_DESC64_PC_HI12 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC_HI20 = 115 #75562
pkg debug/elf, const R_LARCH_TLS_DESC_HI20 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC_LO12 = 116 #75562
pkg debug/elf, const R_LARCH_TLS_DESC_LO12 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC64_LO20 = 117 #75562
pkg debug/elf, const R_LARCH_TLS_DESC64_LO20 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC64_HI12 = 118 #75562
pkg debug/elf, const R_LARCH_TLS_DESC64_HI12 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC_LD = 119 #75562
pkg debug/elf, const R_LARCH_TLS_DESC_LD R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC_CALL = 120 #75562
pkg debug/elf, const R_LARCH_TLS_DESC_CALL R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_LE_HI20_R = 121 #75562
pkg debug/elf, const R_LARCH_TLS_LE_HI20_R R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_LE_ADD_R = 122 #75562
pkg debug/elf, const R_LARCH_TLS_LE_ADD_R R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_LE_LO12_R = 123 #75562
pkg debug/elf, const R_LARCH_TLS_LE_LO12_R R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_LD_PCREL20_S2 = 124 #75562
pkg debug/elf, const R_LARCH_TLS_LD_PCREL20_S2 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_GD_PCREL20_S2 = 125 #75562
pkg debug/elf, const R_LARCH_TLS_GD_PCREL20_S2 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC_PCREL20_S2 = 126 #75562
pkg debug/elf, const R_LARCH_TLS_DESC_PCREL20_S2 R_LARCH #75562

View file

@ -1 +0,0 @@
pkg go/token, method (*File) End() Pos #75849

View file

@ -1 +0,0 @@
pkg go/ast, type BasicLit struct, ValueEnd token.Pos #76031

View file

@ -1,6 +1,6 @@
<!--{ <!--{
"Title": "The Go Programming Language Specification", "Title": "The Go Programming Language Specification",
"Subtitle": "Language version go1.26 (Nov 18, 2025)", "Subtitle": "Language version go1.26 (Dec 2, 2025)",
"Path": "/ref/spec" "Path": "/ref/spec"
}--> }-->
@ -7496,7 +7496,7 @@ returns a received value along with an indication of whether the channel is clos
<p> <p>
If the type of the argument to <code>close</code> is a If the type of the argument to <code>close</code> is a
<a href="#Type_parameter_declarations">type parameter</a>, <a href="#Type_parameter_declarations">type parameter</a>,
all types in its type set must be channels with the same element type. all types in its type set must be channels.
It is an error if any of those channels is a receive-only channel. It is an error if any of those channels is a receive-only channel.
</p> </p>
@ -7797,37 +7797,29 @@ min(x, y, z) == min(min(x, y), z)
The built-in function <code>new</code> creates a new, initialized The built-in function <code>new</code> creates a new, initialized
<a href="#Variables">variable</a> and returns <a href="#Variables">variable</a> and returns
a <a href="#Pointer_types">pointer</a> to it. a <a href="#Pointer_types">pointer</a> to it.
It accepts a single argument, which may be either a type or an expression.
</p>
It accepts a single argument, which may be either an expression or a type.
</p>
<p>
If the argument <code>expr</code> is an expression of
type <code>T</code>, or an untyped constant expression
whose <a href="#Constants">default type</a> is <code>T</code>,
then <code>new(expr)</code> allocates a variable of
type <code>T</code>, initializes it to the value
of <code>expr</code>, and returns its address, a value of
type <code>*T</code>.
</p>
<p> <p>
If the argument is a type <code>T</code>, then <code>new(T)</code> If the argument is a type <code>T</code>, then <code>new(T)</code>
allocates a variable initialized to allocates a variable of type <code>T</code> initialized to its
the <a href="#The_zero_value">zero value</a> of type <code>T</code>. <a href="#The_zero_value">zero value</a>.
</p> </p>
<p> <p>
For example, <code>new(123)</code> and <code>new(int)</code> each If the argument is an expression <code>x</code>, then <code>new(x)</code>
allocates a variable of the type of <code>x</code> initialized to the value of <code>x</code>.
If that value is an untyped constant, it is first implicitly <a href="#Conversions">converted</a>
to its <a href="#Constants">default type</a>;
if it is an untyped boolean value, it is first implicitly converted to type bool.
The predeclared identifier <code>nil</code> cannot be used as an argument to <code>new</code>.
</p>
<p>
For example, <code>new(int)</code> and <code>new(123)</code> each
return a pointer to a new variable of type <code>int</code>. return a pointer to a new variable of type <code>int</code>.
The value of the first variable is <code>0</code>, and the value
The value of the first variable is <code>123</code>, and the value of the second is <code>123</code>. Similarly
of the second is <code>0</code>.
</p>
<pre class="grammar">
new(T)
</pre>
<p>
For instance
</p> </p>
<pre> <pre>
@ -7836,13 +7828,12 @@ new(S)
</pre> </pre>
<p> <p>
allocates storage for a variable of type <code>S</code>, allocates a variable of type <code>S</code>,
initializes it (<code>a=0</code>, <code>b=0.0</code>), initializes it (<code>a=0</code>, <code>b=0.0</code>),
and returns a value of type <code>*S</code> containing the address and returns a value of type <code>*S</code> containing the address
of the location. of the variable.
</p> </p>
<h3 id="Handling_panics">Handling panics</h3> <h3 id="Handling_panics">Handling panics</h3>
<p> Two built-in functions, <code>panic</code> and <code>recover</code>, <p> Two built-in functions, <code>panic</code> and <code>recover</code>,

View file

@ -168,6 +168,21 @@ allows malformed hostnames containing colons outside of a bracketed IPv6 address
The default `urlstrictcolons=1` rejects URLs such as `http://localhost:1:2` or `http://::1/`. The default `urlstrictcolons=1` rejects URLs such as `http://localhost:1:2` or `http://::1/`.
Colons are permitted as part of a bracketed IPv6 address, such as `http://[::1]/`. Colons are permitted as part of a bracketed IPv6 address, such as `http://[::1]/`.
Go 1.26 enabled two additional post-quantum key exchange mechanisms:
SecP256r1MLKEM768 and SecP384r1MLKEM1024. The default can be reverted using the
[`tlssecpmlkem` setting](/pkg/crypto/tls/#Config.CurvePreferences).
Go 1.26 added a new `tracebacklabels` setting that controls the inclusion of
goroutine labels set through the the `runtime/pprof` package. Setting `tracebacklabels=1`
includes these key/value pairs in the goroutine status header of runtime
tracebacks and debug=2 runtime/pprof stack dumps. This format may change in the future.
(see go.dev/issue/76349)
Go 1.26 added a new `cryptocustomrand` setting that controls whether most crypto/...
APIs ignore the random `io.Reader` parameter. For Go 1.26, it defaults
to `cryptocustomrand=0`, ignoring the random parameters. Using `cryptocustomrand=1`
reverts to the pre-Go 1.26 behavior.
### Go 1.25 ### Go 1.25
Go 1.25 added a new `decoratemappings` setting that controls whether the Go Go 1.25 added a new `decoratemappings` setting that controls whether the Go
@ -291,7 +306,7 @@ Go 1.23 changed the channels created by package time to be unbuffered
and [`Timer.Reset`](/pkg/time/#Timer.Reset) method results much easier. and [`Timer.Reset`](/pkg/time/#Timer.Reset) method results much easier.
The [`asynctimerchan` setting](/pkg/time/#NewTimer) disables this change. The [`asynctimerchan` setting](/pkg/time/#NewTimer) disables this change.
There are no runtime metrics for this change, There are no runtime metrics for this change,
This setting may be removed in a future release, Go 1.27 at the earliest. This setting will be removed in Go 1.27.
Go 1.23 changed the mode bits reported by [`os.Lstat`](/pkg/os#Lstat) and [`os.Stat`](/pkg/os#Stat) Go 1.23 changed the mode bits reported by [`os.Lstat`](/pkg/os#Lstat) and [`os.Stat`](/pkg/os#Stat)
for reparse points, which can be controlled with the `winsymlink` setting. for reparse points, which can be controlled with the `winsymlink` setting.
@ -328,6 +343,7 @@ any effect.
Go 1.23 changed the default TLS cipher suites used by clients and servers when Go 1.23 changed the default TLS cipher suites used by clients and servers when
not explicitly configured, removing 3DES cipher suites. The default can be reverted not explicitly configured, removing 3DES cipher suites. The default can be reverted
using the [`tls3des` setting](/pkg/crypto/tls/#Config.CipherSuites). using the [`tls3des` setting](/pkg/crypto/tls/#Config.CipherSuites).
This setting will be removed in Go 1.27.
Go 1.23 changed the behavior of [`tls.X509KeyPair`](/pkg/crypto/tls#X509KeyPair) Go 1.23 changed the behavior of [`tls.X509KeyPair`](/pkg/crypto/tls#X509KeyPair)
and [`tls.LoadX509KeyPair`](/pkg/crypto/tls#LoadX509KeyPair) to populate the and [`tls.LoadX509KeyPair`](/pkg/crypto/tls#LoadX509KeyPair) to populate the
@ -335,6 +351,7 @@ Leaf field of the returned [`tls.Certificate`](/pkg/crypto/tls#Certificate).
This behavior is controlled by the `x509keypairleaf` setting. For Go 1.23, it This behavior is controlled by the `x509keypairleaf` setting. For Go 1.23, it
defaults to `x509keypairleaf=1`. Previous versions default to defaults to `x509keypairleaf=1`. Previous versions default to
`x509keypairleaf=0`. `x509keypairleaf=0`.
This setting will be removed in Go 1.27.
Go 1.23 changed Go 1.23 changed
[`net/http.ServeContent`](/pkg/net/http#ServeContent), [`net/http.ServeContent`](/pkg/net/http#ServeContent),
@ -368,21 +385,24 @@ Whether the type checker produces `Alias` types or not is controlled by the
[`gotypesalias` setting](/pkg/go/types#Alias). [`gotypesalias` setting](/pkg/go/types#Alias).
For Go 1.22 it defaults to `gotypesalias=0`. For Go 1.22 it defaults to `gotypesalias=0`.
For Go 1.23, `gotypesalias=1` will become the default. For Go 1.23, `gotypesalias=1` will become the default.
This setting will be removed in a future release, Go 1.27 at the earliest. This setting will be removed in Go 1.27.
Go 1.22 changed the default minimum TLS version supported by both servers Go 1.22 changed the default minimum TLS version supported by both servers
and clients to TLS 1.2. The default can be reverted to TLS 1.0 using the and clients to TLS 1.2. The default can be reverted to TLS 1.0 using the
[`tls10server` setting](/pkg/crypto/tls/#Config). [`tls10server` setting](/pkg/crypto/tls/#Config).
This setting will be removed in Go 1.27.
Go 1.22 changed the default TLS cipher suites used by clients and servers when Go 1.22 changed the default TLS cipher suites used by clients and servers when
not explicitly configured, removing the cipher suites which used RSA based key not explicitly configured, removing the cipher suites which used RSA based key
exchange. The default can be reverted using the [`tlsrsakex` setting](/pkg/crypto/tls/#Config). exchange. The default can be reverted using the [`tlsrsakex` setting](/pkg/crypto/tls/#Config).
This setting will be removed in Go 1.27.
Go 1.22 disabled Go 1.22 disabled
[`ConnectionState.ExportKeyingMaterial`](/pkg/crypto/tls/#ConnectionState.ExportKeyingMaterial) [`ConnectionState.ExportKeyingMaterial`](/pkg/crypto/tls/#ConnectionState.ExportKeyingMaterial)
when the connection supports neither TLS 1.3 nor Extended Master Secret when the connection supports neither TLS 1.3 nor Extended Master Secret
(implemented in Go 1.21). It can be reenabled with the [`tlsunsafeekm` (implemented in Go 1.21). It can be reenabled with the [`tlsunsafeekm`
setting](/pkg/crypto/tls/#ConnectionState.ExportKeyingMaterial). setting](/pkg/crypto/tls/#ConnectionState.ExportKeyingMaterial).
This setting will be removed in Go 1.27.
Go 1.22 changed how the runtime interacts with transparent huge pages on Linux. Go 1.22 changed how the runtime interacts with transparent huge pages on Linux.
In particular, a common default Linux kernel configuration can result in In particular, a common default Linux kernel configuration can result in

View file

@ -1,10 +1 @@
### Minor changes to the library {#minor_library_changes} ### Minor changes to the library {#minor_library_changes}
#### go/types
The `Var.Kind` method returns an enumeration of type `VarKind` that
classifies the variable (package-level, local, receiver, parameter,
result, or struct field). See issue #70250.
Callers of `NewVar` or `NewParam` are encouraged to call `Var.SetKind`
to ensure that this attribute is set correctly in all cases.

View file

@ -1,8 +0,0 @@
<style>
main ul li { margin: 0.5em 0; }
</style>
## DRAFT RELEASE NOTES — Introduction to Go 1.26 {#introduction}
**Go 1.26 is not yet released. These are work-in-progress release notes.
Go 1.26 is expected to be released in February 2026.**

View file

@ -1,32 +0,0 @@
## Changes to the language {#language}
<!-- https://go.dev/issue/45624 --->
The built-in `new` function, which creates a new variable, now allows
its operand to be an expression, specifying the initial value of the
variable.
This feature is particularly useful when working with serialization
packages such as `encoding/json` or protocol buffers that use a
pointer to represent an optional value, as it enables an optional
field to be populated in a simple expression, for example:
```go
import "encoding/json"
type Person struct {
Name string `json:"name"`
Age *int `json:"age"` // age if known; nil otherwise
}
func personJSON(name string, born time.Time) ([]byte, error) {
return json.Marshal(Person{
Name: name,
Age: new(yearsSince(born)),
})
}
func yearsSince(t time.Time) int {
return int(time.Since(t).Hours() / (365.25 * 24)) // approximately
}
```

View file

@ -1,21 +0,0 @@
## Tools {#tools}
### Go command {#go-command}
<!-- go.dev/issue/74667 -->
`cmd/doc`, and `go tool doc` have been deleted. `go doc` can be used as
a replacement for `go tool doc`: it takes the same flags and arguments and
has the same behavior.
<!-- go.dev/issue/75432 -->
The `go fix` command, following the pattern of `go vet` in Go 1.10,
now uses the Go analysis framework (`golang.org/x/tools/go/analysis`).
This means the same analyzers that provide diagnostics in `go vet`
can be used to suggest and apply fixes in `go fix`.
The `go fix` command's historical fixers, all of which were obsolete,
have been removed and replaced by a suite of new analyzers that
offer fixes to use newer features of the language and library.
<!-- I'll write a blog post that discusses this at length. --adonovan -->
### Cgo {#cgo}

View file

@ -1 +0,0 @@
## Runtime {#runtime}

View file

@ -1,16 +0,0 @@
## Compiler {#compiler}
## Assembler {#assembler}
## Linker {#linker}
On 64-bit ARM-based Windows (the `windows/arm64` port), the linker now supports internal
linking mode of cgo programs, which can be requested with the
`-ldflags=-linkmode=internal` flag.
## Bootstrap {#bootstrap}
<!-- go.dev/issue/69315 -->
As mentioned in the [Go 1.24 release notes](/doc/go1.24#bootstrap), Go 1.26 now requires
Go 1.24.6 or later for bootstrap.
We expect that Go 1.28 will require a minor release of Go 1.26 or later for bootstrap.

View file

@ -1,2 +0,0 @@
## Standard library {#library}

View file

@ -1,10 +0,0 @@
### Minor changes to the library {#minor_library_changes}
#### go/types
The `Var.Kind` method returns an enumeration of type `VarKind` that
classifies the variable (package-level, local, receiver, parameter,
result, or struct field). See issue #70250.
Callers of `NewVar` or `NewParam` are encouraged to call `Var.SetKind`
to ensure that this attribute is set correctly in all cases.

View file

@ -1 +0,0 @@
API changes and other small changes to the standard library go here.

View file

@ -1,2 +0,0 @@
The new [Buffer.Peek] method returns the next n bytes from the buffer without
advancing it.

View file

@ -1,2 +0,0 @@
The new [Encapsulator] and [Decapsulator] interfaces allow accepting abstract
KEM encapsulation or decapsulation keys.

View file

@ -1,2 +0,0 @@
The new [KeyExchanger] interface, implemented by [PrivateKey], makes it possible
to accept abstract ECDH private keys, e.g. those implemented in hardware.

View file

@ -1 +0,0 @@
The `big.Int` fields of [PublicKey] and [PrivateKey] are now deprecated.

View file

@ -1,3 +0,0 @@
The new [DecapsulationKey768.Encapsulator] and
[DecapsulationKey1024.Encapsulator] methods implement the new
[crypto.Decapsulator] interface.

View file

@ -1,3 +0,0 @@
The new [crypto/mlkem/mlkemtest] package exposes the [Encapsulate768] and
[Encapsulate1024] functions which implement derandomized ML-KEM encapsulation,
for use with known-answer tests.

View file

@ -1,5 +0,0 @@
If [PrivateKey] fields are modified after calling [PrivateKey.Precompute],
[PrivateKey.Validate] now fails.
[PrivateKey.D] is now checked for consistency with precomputed values, even if
it is not used.

View file

@ -1,2 +0,0 @@
Unsafe PKCS #1 v1.5 encryption padding (implemented by [EncryptPKCS1v15],
[DecryptPKCS1v15], and [DecryptPKCS1v15SessionKey]) is now deprecated.

View file

@ -1,2 +0,0 @@
The [QUICConn] type used by QUIC implementations includes new event
for reporting TLS handshake errors.

View file

@ -1 +0,0 @@
A database driver may implement [RowsColumnScanner] to entirely override `Scan` behavior.

View file

@ -1,4 +0,0 @@
Additional `R_LARCH_*` constants from [LoongArch ELF psABI v20250521][laelf-20250521]
(global version v2.40) are defined for use with LoongArch systems.
[laelf-20250521]: https://github.com/loongson/la-abi-specs/blob/v2.40/laelf.adoc

View file

@ -1,2 +0,0 @@
The new [AsType] function is a generic version of [As]. It is type-safe, faster,
and, in most cases, easier to use.

View file

@ -1,4 +0,0 @@
The new [ParseDirective] function parses [directive
comments](/doc/comment#Syntax), which are comments such as `//go:generate`.
Source code tools can support their own directive comments and this new API
should help them implement the conventional syntax.

View file

@ -1,5 +0,0 @@
The new [BasicLit.ValueEnd] field records the precise end position of
a literal so that the [BasicLit.End] method can now always return the
correct answer. (Previously it was computed using a heuristic that was
incorrect for multi-line raw string literals in Windows source files,
due to removal of carriage returns.)

View file

@ -1 +0,0 @@
The new [File.End] convenience method returns the file's end position.

View file

@ -1,2 +0,0 @@
The JPEG encoder and decoder have been replaced with new, faster, more accurate implementations.
Code that expects specific bit-for-bit outputs from the encoder or decoder may need to be updated.

View file

@ -1,6 +0,0 @@
The [`NewMultiHandler`](/pkg/log/slog#NewMultiHandler) function creates a
[`MultiHandler`](/pkg/log/slog#MultiHandler) that invokes all the given Handlers.
Its `Enable` method reports whether any of the handlers' `Enabled` methods
return true.
Its `Handle`, `WithAttr` and `WithGroup` methods call the corresponding method
on each of the enabled handlers.

View file

@ -1 +0,0 @@
Added context aware dial functions for TCP, UDP, IP and Unix networks.

View file

@ -1,4 +0,0 @@
The new
[HTTP2Config.StrictMaxConcurrentRequests](/pkg/net/http#HTTP2Config.StrictMaxConcurrentRequests)
field controls whether a new connection should be opened
if an existing HTTP/2 connection has exceeded its stream limit.

View file

@ -1,2 +0,0 @@
The HTTP client returned by [Server.Client] will now redirect requests for
`example.com` and any subdomains to the server being tested.

View file

@ -1,11 +0,0 @@
The [ReverseProxy.Director] configuration field is deprecated
in favor of [ReverseProxy.Rewrite].
A malicious client can remove headers added by a `Director` function
by designating those headers as hop-by-hop. Since there is no way to address
this problem within the scope of the `Director` API, we added a new
`Rewrite` hook in Go 1.20. `Rewrite` hooks are provided with both the
unmodified inbound request received by the proxy and the outbound request
which will be sent by the proxy.
Since the `Director` hook is fundamentally unsafe, we are now deprecating it.

View file

@ -1 +0,0 @@
The new [Prefix.Compare] method compares two prefixes.

View file

@ -1,4 +0,0 @@
[Parse] now rejects malformed URLs containing colons in the host subcomponent,
such as `http://::1/` or `http://localhost:80:80/`.
URLs containing bracketed IPv6 addresses, such as `http://[::1]/` are still accepted.
The new GODEBUG=urlstrictcolons=0 setting restores the old behavior.

View file

@ -1,4 +0,0 @@
The new [Process.WithHandle] method provides access to an internal process
handle on supported platforms (Linux 5.4 or later and Windows). On Linux,
the process handle is a pidfd. The method returns [ErrNoHandle] on unsupported
platforms or when no process handle is available.

View file

@ -1,4 +0,0 @@
On Windows, the [OpenFile] `flag` parameter can now contain any combination of
Windows-specific file flags, such as `FILE_FLAG_OVERLAPPED` and
`FILE_FLAG_SEQUENTIAL_SCAN`, for control of file or device caching behavior,
access modes, and other special-purpose flags.

View file

@ -1,2 +0,0 @@
[NotifyContext] now cancels the returned context with [context.CancelCauseFunc]
and an error indicating which signal was received.

View file

@ -1,18 +0,0 @@
The new methods [T.ArtifactDir], [B.ArtifactDir], and [F.ArtifactDir]
return a directory in which to write test output files (artifacts).
When the `-artifacts` flag is provided to `go test`,
this directory will be located under the output directory
(specified with `-outputdir`, or the current directory by default).
Otherwise, artifacts are stored in a temporary directory
which is removed after the test completes.
The first call to `ArtifactDir` when `-artifacts` is provided
writes the location of the directory to the test log.
For example, in a test named `TestArtifacts`,
`t.ArtifactDir()` emits:
```
=== ARTIFACTS Test /path/to/artifact/dir
```

View file

@ -1,13 +0,0 @@
## Ports {#ports}
### Darwin
<!-- go.dev/issue/75836 -->
Go 1.26 is the last release that will run on macOS 12 Monterey. Go 1.27 will require macOS 13 Ventura or later.
### Windows
<!-- go.dev/issue/71671 -->
As [announced](/doc/go1.25#windows) in the Go 1.25 release notes, the [broken](/doc/go1.24#windows) 32-bit windows/arm port (`GOOS=windows` `GOARCH=arm`) is removed.

View file

@ -33,13 +33,17 @@ lable2:
MOVV R4, R5 // 85001500 MOVV R4, R5 // 85001500
MOVBU R4, R5 // 85fc4303 MOVBU R4, R5 // 85fc4303
SUB R4, R5, R6 // a6101100 SUB R4, R5, R6 // a6101100
SUBW R4, R5, R6 // a6101100
SUBV R4, R5, R6 // a6901100 SUBV R4, R5, R6 // a6901100
ADD R4, R5, R6 // a6101000 ADD R4, R5, R6 // a6101000
ADDW R4, R5, R6 // a6101000
ADDV R4, R5, R6 // a6901000 ADDV R4, R5, R6 // a6901000
AND R4, R5, R6 // a6901400 AND R4, R5, R6 // a6901400
SUB R4, R5 // a5101100 SUB R4, R5 // a5101100
SUBW R4, R5 // a5101100
SUBV R4, R5 // a5901100 SUBV R4, R5 // a5901100
ADD R4, R5 // a5101000 ADD R4, R5 // a5101000
ADDW R4, R5 // a5101000
ADDV R4, R5 // a5901000 ADDV R4, R5 // a5901000
AND R4, R5 // a5901400 AND R4, R5 // a5901400
NEGW R4, R5 // 05101100 NEGW R4, R5 // 05101100
@ -115,6 +119,8 @@ lable2:
MOVV $1, R4 // 04048003 MOVV $1, R4 // 04048003
ADD $-1, R4, R5 // 85fcbf02 ADD $-1, R4, R5 // 85fcbf02
ADD $-1, R4 // 84fcbf02 ADD $-1, R4 // 84fcbf02
ADDW $-1, R4, R5 // 85fcbf02
ADDW $-1, R4 // 84fcbf02
ADDV $-1, R4, R5 // 85fcff02 ADDV $-1, R4, R5 // 85fcff02
ADDV $-1, R4 // 84fcff02 ADDV $-1, R4 // 84fcff02
AND $1, R4, R5 // 85044003 AND $1, R4, R5 // 85044003
@ -165,6 +171,8 @@ lable2:
// mul // mul
MUL R4, R5 // a5101c00 MUL R4, R5 // a5101c00
MUL R4, R5, R6 // a6101c00 MUL R4, R5, R6 // a6101c00
MULW R4, R5 // a5101c00
MULW R4, R5, R6 // a6101c00
MULV R4, R5 // a5901d00 MULV R4, R5 // a5901d00
MULV R4, R5, R6 // a6901d00 MULV R4, R5, R6 // a6901d00
MULVU R4, R5 // a5901d00 MULVU R4, R5 // a5901d00
@ -191,20 +199,26 @@ lable2:
MOVHU R4, 1(R5) // a4044029 MOVHU R4, 1(R5) // a4044029
MOVHU y+8(FP), R4 // 6440402a MOVHU y+8(FP), R4 // 6440402a
MOVHU 1(R5), R4 // a404402a MOVHU 1(R5), R4 // a404402a
MULU R4, R5 // a5101c00
MULU R4, R5, R6 // a6101c00
MULH R4, R5 // a5901c00 MULH R4, R5 // a5901c00
MULH R4, R5, R6 // a6901c00 MULH R4, R5, R6 // a6901c00
MULHU R4, R5 // a5101d00 MULHU R4, R5 // a5101d00
MULHU R4, R5, R6 // a6101d00 MULHU R4, R5, R6 // a6101d00
REM R4, R5 // a5902000 REM R4, R5 // a5902000
REM R4, R5, R6 // a6902000 REM R4, R5, R6 // a6902000
REMW R4, R5 // a5902000
REMW R4, R5, R6 // a6902000
REMU R4, R5 // a5902100 REMU R4, R5 // a5902100
REMU R4, R5, R6 // a6902100 REMU R4, R5, R6 // a6902100
REMWU R4, R5 // a5902100
REMWU R4, R5, R6 // a6902100
DIV R4, R5 // a5102000 DIV R4, R5 // a5102000
DIV R4, R5, R6 // a6102000 DIV R4, R5, R6 // a6102000
DIVW R4, R5 // a5102000
DIVW R4, R5, R6 // a6102000
DIVU R4, R5 // a5102100 DIVU R4, R5 // a5102100
DIVU R4, R5, R6 // a6102100 DIVU R4, R5, R6 // a6102100
DIVWU R4, R5 // a5102100
DIVWU R4, R5, R6 // a6102100
SRLV R4, R5 // a5101900 SRLV R4, R5 // a5101900
SRLV R4, R5, R6 // a6101900 SRLV R4, R5, R6 // a6101900
SRLV $4, R4, R5 // 85104500 SRLV $4, R4, R5 // 85104500
@ -1075,6 +1089,150 @@ lable2:
XVMULWODVWUW X1, X2, X3 // 4304a374 XVMULWODVWUW X1, X2, X3 // 4304a374
XVMULWODQVUV X1, X2, X3 // 4384a374 XVMULWODQVUV X1, X2, X3 // 4384a374
// [X]VADDW{EV/OD}.{H.B/W.H/D.W/Q.D} instructions
VADDWEVHB V1, V2, V3 // 43041e70
VADDWEVWH V1, V2, V3 // 43841e70
VADDWEVVW V1, V2, V3 // 43041f70
VADDWEVQV V1, V2, V3 // 43841f70
VADDWODHB V1, V2, V3 // 43042270
VADDWODWH V1, V2, V3 // 43842270
VADDWODVW V1, V2, V3 // 43042370
VADDWODQV V1, V2, V3 // 43842370
XVADDWEVHB X1, X2, X3 // 43041e74
XVADDWEVWH X1, X2, X3 // 43841e74
XVADDWEVVW X1, X2, X3 // 43041f74
XVADDWEVQV X1, X2, X3 // 43841f74
XVADDWODHB X1, X2, X3 // 43042274
XVADDWODWH X1, X2, X3 // 43842274
XVADDWODVW X1, X2, X3 // 43042374
XVADDWODQV X1, X2, X3 // 43842374
// [X]VSUBW{EV/OD}.{H.B/W.H/D.W/Q.D} instructions
VSUBWEVHB V1, V2, V3 // 43042070
VSUBWEVWH V1, V2, V3 // 43842070
VSUBWEVVW V1, V2, V3 // 43042170
VSUBWEVQV V1, V2, V3 // 43842170
VSUBWODHB V1, V2, V3 // 43042470
VSUBWODWH V1, V2, V3 // 43842470
VSUBWODVW V1, V2, V3 // 43042570
VSUBWODQV V1, V2, V3 // 43842570
XVSUBWEVHB X1, X2, X3 // 43042074
XVSUBWEVWH X1, X2, X3 // 43842074
XVSUBWEVVW X1, X2, X3 // 43042174
XVSUBWEVQV X1, X2, X3 // 43842174
XVSUBWODHB X1, X2, X3 // 43042474
XVSUBWODWH X1, X2, X3 // 43842474
XVSUBWODVW X1, X2, X3 // 43042574
XVSUBWODQV X1, X2, X3 // 43842574
// [X]VADDW{EV/OD}.{H.B/W.H/D.W/Q.D}U instructions
VADDWEVHBU V1, V2, V3 // 43042e70
VADDWEVWHU V1, V2, V3 // 43042f70
VADDWEVVWU V1, V2, V3 // 43042f70
VADDWEVQVU V1, V2, V3 // 43842f70
VADDWODHBU V1, V2, V3 // 43043270
VADDWODWHU V1, V2, V3 // 43843270
VADDWODVWU V1, V2, V3 // 43043370
VADDWODQVU V1, V2, V3 // 43843370
XVADDWEVHBU X1, X2, X3 // 43042e74
XVADDWEVWHU X1, X2, X3 // 43842e74
XVADDWEVVWU X1, X2, X3 // 43042f74
XVADDWEVQVU X1, X2, X3 // 43842f74
XVADDWODHBU X1, X2, X3 // 43043274
XVADDWODWHU X1, X2, X3 // 43843274
XVADDWODVWU X1, X2, X3 // 43043374
XVADDWODQVU X1, X2, X3 // 43843374
// [X]VSUBW{EV/OD}.{H.B/W.H/D.W/Q.D}U instructions
VSUBWEVHBU V1, V2, V3 // 43043070
VSUBWEVWHU V1, V2, V3 // 43843070
VSUBWEVVWU V1, V2, V3 // 43043170
VSUBWEVQVU V1, V2, V3 // 43843170
VSUBWODHBU V1, V2, V3 // 43043470
VSUBWODWHU V1, V2, V3 // 43843470
VSUBWODVWU V1, V2, V3 // 43043570
VSUBWODQVU V1, V2, V3 // 43843570
XVSUBWEVHBU X1, X2, X3 // 43043074
XVSUBWEVWHU X1, X2, X3 // 43843074
XVSUBWEVVWU X1, X2, X3 // 43043174
XVSUBWEVQVU X1, X2, X3 // 43843174
XVSUBWODHBU X1, X2, X3 // 43043474
XVSUBWODWHU X1, X2, X3 // 43843474
XVSUBWODVWU X1, X2, X3 // 43043574
XVSUBWODQVU X1, X2, X3 // 43843574
// [X]VMADD.{B/H/W/D}, [X]VMSUB.{B/H/W/D} instructions
VMADDB V1, V2, V3 // 4304a870
VMADDH V1, V2, V3 // 4384a870
VMADDW V1, V2, V3 // 4304a970
VMADDV V1, V2, V3 // 4384a970
VMSUBB V1, V2, V3 // 4304aa70
VMSUBH V1, V2, V3 // 4384aa70
VMSUBW V1, V2, V3 // 4304ab70
VMSUBV V1, V2, V3 // 4384ab70
XVMADDB X1, X2, X3 // 4304a874
XVMADDH X1, X2, X3 // 4384a874
XVMADDW X1, X2, X3 // 4304a974
XVMADDV X1, X2, X3 // 4384a974
XVMSUBB X1, X2, X3 // 4304aa74
XVMSUBH X1, X2, X3 // 4384aa74
XVMSUBW X1, X2, X3 // 4304ab74
XVMSUBV X1, X2, X3 // 4384ab74
// [X]VMADDW{EV/OD}.{H.B/W.H/D.W/Q.D} instructions
VMADDWEVHB V1, V2, V3 // 4304ac70
VMADDWEVWH V1, V2, V3 // 4384ac70
VMADDWEVVW V1, V2, V3 // 4304ad70
VMADDWEVQV V1, V2, V3 // 4384ad70
VMADDWODHB V1, V2, V3 // 4304ae70
VMADDWODWH V1, V2, V3 // 4384ae70
VMADDWODVW V1, V2, V3 // 4304af70
VMADDWODQV V1, V2, V3 // 4384af70
XVMADDWEVHB X1, X2, X3 // 4304ac74
XVMADDWEVWH X1, X2, X3 // 4384ac74
XVMADDWEVVW X1, X2, X3 // 4304ad74
XVMADDWEVQV X1, X2, X3 // 4384ad74
XVMADDWODHB X1, X2, X3 // 4304ae74
XVMADDWODWH X1, X2, X3 // 4384ae74
XVMADDWODVW X1, X2, X3 // 4304af74
XVMADDWODQV X1, X2, X3 // 4384af74
// [X]VMADDW{EV/OD}.{H.B/W.H/D.W/Q.D}U instructions
VMADDWEVHBU V1, V2, V3 // 4304b470
VMADDWEVWHU V1, V2, V3 // 4384b470
VMADDWEVVWU V1, V2, V3 // 4304b570
VMADDWEVQVU V1, V2, V3 // 4384b570
VMADDWODHBU V1, V2, V3 // 4304b670
VMADDWODWHU V1, V2, V3 // 4384b670
VMADDWODVWU V1, V2, V3 // 4304b770
VMADDWODQVU V1, V2, V3 // 4384b770
XVMADDWEVHBU X1, X2, X3 // 4304b474
XVMADDWEVWHU X1, X2, X3 // 4384b474
XVMADDWEVVWU X1, X2, X3 // 4304b574
XVMADDWEVQVU X1, X2, X3 // 4384b574
XVMADDWODHBU X1, X2, X3 // 4304b674
XVMADDWODWHU X1, X2, X3 // 4384b674
XVMADDWODVWU X1, X2, X3 // 4304b774
XVMADDWODQVU X1, X2, X3 // 4384b774
// [X]VMADDW{EV/OD}.{H.BU.B/W.HU.H/D.WU.W/Q.DU.D} instructions
VMADDWEVHBUB V1, V2, V3 // 4304bc70
VMADDWEVWHUH V1, V2, V3 // 4384bc70
VMADDWEVVWUW V1, V2, V3 // 4304bd70
VMADDWEVQVUV V1, V2, V3 // 4384bd70
VMADDWODHBUB V1, V2, V3 // 4304be70
VMADDWODWHUH V1, V2, V3 // 4384be70
VMADDWODVWUW V1, V2, V3 // 4304bf70
VMADDWODQVUV V1, V2, V3 // 4384bf70
XVMADDWEVHBUB X1, X2, X3 // 4304bc74
XVMADDWEVWHUH X1, X2, X3 // 4384bc74
XVMADDWEVVWUW X1, X2, X3 // 4304bd74
XVMADDWEVQVUV X1, X2, X3 // 4384bd74
XVMADDWODHBUB X1, X2, X3 // 4304be74
XVMADDWODWHUH X1, X2, X3 // 4384be74
XVMADDWODVWUW X1, X2, X3 // 4304bf74
XVMADDWODQVUV X1, X2, X3 // 4384bf74
// [X]VSHUF4I.{B/H/W/D} instructions // [X]VSHUF4I.{B/H/W/D} instructions
VSHUF4IB $0, V2, V1 // 41009073 VSHUF4IB $0, V2, V1 // 41009073
VSHUF4IB $16, V2, V1 // 41409073 VSHUF4IB $16, V2, V1 // 41409073

View file

@ -21,6 +21,10 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
ADD $4096, R4, R5 // 3e00001485781000 ADD $4096, R4, R5 // 3e00001485781000
ADD $65536, R4 // 1e02001484781000 ADD $65536, R4 // 1e02001484781000
ADD $4096, R4 // 3e00001484781000 ADD $4096, R4 // 3e00001484781000
ADDW $65536, R4, R5 // 1e02001485781000
ADDW $4096, R4, R5 // 3e00001485781000
ADDW $65536, R4 // 1e02001484781000
ADDW $4096, R4 // 3e00001484781000
ADDV $65536, R4, R5 // 1e02001485f81000 ADDV $65536, R4, R5 // 1e02001485f81000
ADDV $4096, R4, R5 // 3e00001485f81000 ADDV $4096, R4, R5 // 3e00001485f81000
ADDV $65536, R4 // 1e02001484f81000 ADDV $65536, R4 // 1e02001484f81000
@ -37,10 +41,6 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
SGTU $4096, R4, R5 // 3e00001485f81200 SGTU $4096, R4, R5 // 3e00001485f81200
SGTU $65536, R4 // 1e02001484f81200 SGTU $65536, R4 // 1e02001484f81200
SGTU $4096, R4 // 3e00001484f81200 SGTU $4096, R4 // 3e00001484f81200
ADDU $65536, R4, R5 // 1e02001485781000
ADDU $4096, R4, R5 // 3e00001485781000
ADDU $65536, R4 // 1e02001484781000
ADDU $4096, R4 // 3e00001484781000
ADDVU $65536, R4, R5 // 1e02001485f81000 ADDVU $65536, R4, R5 // 1e02001485f81000
ADDVU $4096, R4, R5 // 3e00001485f81000 ADDVU $4096, R4, R5 // 3e00001485f81000
ADDVU $65536, R4 // 1e02001484f81000 ADDVU $65536, R4 // 1e02001484f81000

View file

@ -11,12 +11,16 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
MOVV $4096(R4), R5 // 3e000014de03800385f81000 MOVV $4096(R4), R5 // 3e000014de03800385f81000
ADD $74565, R4 // 5e020014de178d0384781000 ADD $74565, R4 // 5e020014de178d0384781000
ADD $4097, R4 // 3e000014de07800384781000 ADD $4097, R4 // 3e000014de07800384781000
ADDW $74565, R4 // 5e020014de178d0384781000
ADDW $4097, R4 // 3e000014de07800384781000
ADDV $74565, R4 // 5e020014de178d0384f81000 ADDV $74565, R4 // 5e020014de178d0384f81000
ADDV $4097, R4 // 3e000014de07800384f81000 ADDV $4097, R4 // 3e000014de07800384f81000
AND $74565, R4 // 5e020014de178d0384f81400 AND $74565, R4 // 5e020014de178d0384f81400
AND $4097, R4 // 3e000014de07800384f81400 AND $4097, R4 // 3e000014de07800384f81400
ADD $74565, R4, R5 // 5e020014de178d0385781000 ADD $74565, R4, R5 // 5e020014de178d0385781000
ADD $4097, R4, R5 // 3e000014de07800385781000 ADD $4097, R4, R5 // 3e000014de07800385781000
ADDW $74565, R4, R5 // 5e020014de178d0385781000
ADDW $4097, R4, R5 // 3e000014de07800385781000
ADDV $74565, R4, R5 // 5e020014de178d0385f81000 ADDV $74565, R4, R5 // 5e020014de178d0385f81000
ADDV $4097, R4, R5 // 3e000014de07800385f81000 ADDV $4097, R4, R5 // 3e000014de07800385f81000
AND $74565, R4, R5 // 5e020014de178d0385f81400 AND $74565, R4, R5 // 5e020014de178d0385f81400
@ -107,10 +111,6 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
SGTU $74565, R4, R5 // 5e020014de178d0385f81200 SGTU $74565, R4, R5 // 5e020014de178d0385f81200
SGTU $4097, R4 // 3e000014de07800384f81200 SGTU $4097, R4 // 3e000014de07800384f81200
SGTU $4097, R4, R5 // 3e000014de07800385f81200 SGTU $4097, R4, R5 // 3e000014de07800385f81200
ADDU $74565, R4 // 5e020014de178d0384781000
ADDU $74565, R4, R5 // 5e020014de178d0385781000
ADDU $4097, R4 // 3e000014de07800384781000
ADDU $4097, R4, R5 // 3e000014de07800385781000
ADDVU $4097, R4 // 3e000014de07800384f81000 ADDVU $4097, R4 // 3e000014de07800384f81000
ADDVU $4097, R4, R5 // 3e000014de07800385f81000 ADDVU $4097, R4, R5 // 3e000014de07800385f81000
ADDVU $74565, R4 // 5e020014de178d0384f81000 ADDVU $74565, R4 // 5e020014de178d0384f81000

View file

@ -1871,6 +1871,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// zeroX15 zeroes the X15 register. // zeroX15 zeroes the X15 register.
func zeroX15(s *ssagen.State) { func zeroX15(s *ssagen.State) {
if !buildcfg.Experiment.SIMD {
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
return
}
vxorps := func(s *ssagen.State) { vxorps := func(s *ssagen.State) {
p := s.Prog(x86.AVXORPS) p := s.Prog(x86.AVXORPS)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG

View file

@ -1322,6 +1322,11 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
// AuxInt encodes how many buffer entries we need. // AuxInt encodes how many buffer entries we need.
p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1] p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
case ssa.OpARM64LoweredMemEq:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Memequal
case ssa.OpARM64LoweredPanicBoundsRR, ssa.OpARM64LoweredPanicBoundsRC, ssa.OpARM64LoweredPanicBoundsCR, ssa.OpARM64LoweredPanicBoundsCC: case ssa.OpARM64LoweredPanicBoundsRR, ssa.OpARM64LoweredPanicBoundsRC, ssa.OpARM64LoweredPanicBoundsCR, ssa.OpARM64LoweredPanicBoundsCC:
// Compute the constant we put in the PCData entry for this call. // Compute the constant we put in the PCData entry for this call.

View file

@ -5,11 +5,7 @@
package base package base
import ( import (
"fmt"
"os" "os"
"runtime"
"runtime/debug"
"runtime/metrics"
) )
var atExitFuncs []func() var atExitFuncs []func()
@ -29,193 +25,3 @@ func Exit(code int) {
// To enable tracing support (-t flag), set EnableTrace to true. // To enable tracing support (-t flag), set EnableTrace to true.
const EnableTrace = false const EnableTrace = false
// forEachGC calls fn each GC cycle until it returns false.
func forEachGC(fn func() bool) {
type T [32]byte // large enough to avoid runtime's tiny object allocator
var finalizer func(*T)
finalizer = func(p *T) {
if fn() {
runtime.SetFinalizer(p, finalizer)
}
}
finalizer(new(T))
}
// AdjustStartingHeap modifies GOGC so that GC should not occur until the heap
// grows to the requested size. This is intended but not promised, though it
// is true-mostly, depending on when the adjustment occurs and on the
// compiler's input and behavior. Once this size is approximately reached
// GOGC is reset to 100; subsequent GCs may reduce the heap below the requested
// size, but this function does not affect that.
//
// -d=gcadjust=1 enables logging of GOGC adjustment events.
//
// NOTE: If you think this code would help startup time in your own
// application and you decide to use it, please benchmark first to see if it
// actually works for you (it may not: the Go compiler is not typical), and
// whatever the outcome, please leave a comment on bug #56546. This code
// uses supported interfaces, but depends more than we like on
// current+observed behavior of the garbage collector, so if many people need
// this feature, we should consider/propose a better way to accomplish it.
func AdjustStartingHeap(requestedHeapGoal uint64) {
logHeapTweaks := Debug.GCAdjust == 1
mp := runtime.GOMAXPROCS(0)
gcConcurrency := Flag.LowerC
const (
goal = "/gc/heap/goal:bytes"
count = "/gc/cycles/total:gc-cycles"
allocs = "/gc/heap/allocs:bytes"
frees = "/gc/heap/frees:bytes"
)
sample := []metrics.Sample{{Name: goal}, {Name: count}, {Name: allocs}, {Name: frees}}
const (
GOAL = 0
COUNT = 1
ALLOCS = 2
FREES = 3
)
// Assumptions and observations of Go's garbage collector, as of Go 1.17-1.20:
// - the initial heap goal is 4M, by fiat. It is possible for Go to start
// with a heap as small as 512k, so this may change in the future.
// - except for the first heap goal, heap goal is a function of
// observed-live at the previous GC and current GOGC. After the first
// GC, adjusting GOGC immediately updates GOGC; before the first GC,
// adjusting GOGC does not modify goal (but the change takes effect after
// the first GC).
// - the before/after first GC behavior is not guaranteed anywhere, it's
// just behavior, and it's a bad idea to rely on it.
// - we don't know exactly when GC will run, even after we adjust GOGC; the
// first GC may not have happened yet, may have already happened, or may
// be currently in progress, and GCs can start for several reasons.
// - forEachGC above will run the provided function at some delay after each
// GC's mark phase terminates; finalizers are run after marking as the
// spans containing finalizable objects are swept, driven by GC
// background activity and allocation demand.
// - "live at last GC" is not available through the current metrics
// interface. Instead, live is estimated by knowing the adjusted value of
// GOGC and the new heap goal following a GC (this requires knowing that
// at least one GC has occurred):
// estLive = 100 * newGoal / (100 + currentGogc)
// this new value of GOGC
// newGogc = 100*requestedHeapGoal/estLive - 100
// will result in the desired goal. The logging code checks that the
// resulting goal is correct.
// There's a small risk that the finalizer will be slow to run after a GC
// that expands the goal to a huge value, and that this will lead to
// out-of-memory. This doesn't seem to happen; in experiments on a variety
// of machines with a variety of extra loads to disrupt scheduling, the
// worst overshoot observed was 50% past requestedHeapGoal.
metrics.Read(sample)
for _, s := range sample {
if s.Value.Kind() == metrics.KindBad {
// Just return, a slightly slower compilation is a tolerable outcome.
if logHeapTweaks {
fmt.Fprintf(os.Stderr, "GCAdjust: Regret unexpected KindBad for metric %s\n", s.Name)
}
return
}
}
// Tinker with GOGC to make the heap grow rapidly at first.
currentGoal := sample[GOAL].Value.Uint64() // Believe this will be 4MByte or less, perhaps 512k
myGogc := 100 * requestedHeapGoal / currentGoal
if myGogc <= 150 {
return
}
if logHeapTweaks {
sample := append([]metrics.Sample(nil), sample...) // avoid races with GC callback
AtExit(func() {
metrics.Read(sample)
goal := sample[GOAL].Value.Uint64()
count := sample[COUNT].Value.Uint64()
oldGogc := debug.SetGCPercent(100)
if oldGogc == 100 {
fmt.Fprintf(os.Stderr, "GCAdjust: AtExit goal %d gogc %d count %d maxprocs %d gcConcurrency %d\n",
goal, oldGogc, count, mp, gcConcurrency)
} else {
inUse := sample[ALLOCS].Value.Uint64() - sample[FREES].Value.Uint64()
overPct := 100 * (int(inUse) - int(requestedHeapGoal)) / int(requestedHeapGoal)
fmt.Fprintf(os.Stderr, "GCAdjust: AtExit goal %d gogc %d count %d maxprocs %d gcConcurrency %d overPct %d\n",
goal, oldGogc, count, mp, gcConcurrency, overPct)
}
})
}
debug.SetGCPercent(int(myGogc))
adjustFunc := func() bool {
metrics.Read(sample)
goal := sample[GOAL].Value.Uint64()
count := sample[COUNT].Value.Uint64()
if goal <= requestedHeapGoal { // Stay the course
if logHeapTweaks {
fmt.Fprintf(os.Stderr, "GCAdjust: Reuse GOGC adjust, current goal %d, count is %d, current gogc %d\n",
goal, count, myGogc)
}
return true
}
// Believe goal has been adjusted upwards, else it would be less-than-or-equal than requestedHeapGoal
calcLive := 100 * goal / (100 + myGogc)
if 2*calcLive < requestedHeapGoal { // calcLive can exceed requestedHeapGoal!
myGogc = 100*requestedHeapGoal/calcLive - 100
if myGogc > 125 {
// Not done growing the heap.
oldGogc := debug.SetGCPercent(int(myGogc))
if logHeapTweaks {
// Check that the new goal looks right
inUse := sample[ALLOCS].Value.Uint64() - sample[FREES].Value.Uint64()
metrics.Read(sample)
newGoal := sample[GOAL].Value.Uint64()
pctOff := 100 * (int64(newGoal) - int64(requestedHeapGoal)) / int64(requestedHeapGoal)
// Check that the new goal is close to requested. 3% of make.bash fails this test. Why, TBD.
if pctOff < 2 {
fmt.Fprintf(os.Stderr, "GCAdjust: Retry GOGC adjust, current goal %d, count is %d, gogc was %d, is now %d, calcLive %d pctOff %d\n",
goal, count, oldGogc, myGogc, calcLive, pctOff)
} else {
// The GC is being annoying and not giving us the goal that we requested, say more to help understand when/why.
fmt.Fprintf(os.Stderr, "GCAdjust: Retry GOGC adjust, current goal %d, count is %d, gogc was %d, is now %d, calcLive %d pctOff %d inUse %d\n",
goal, count, oldGogc, myGogc, calcLive, pctOff, inUse)
}
}
return true
}
}
// In this case we're done boosting GOGC, set it to 100 and don't set a new finalizer.
oldGogc := debug.SetGCPercent(100)
// inUse helps estimate how late the finalizer ran; at the instant the previous GC ended,
// it was (in theory) equal to the previous GC's heap goal. In a growing heap it is
// expected to grow to the new heap goal.
inUse := sample[ALLOCS].Value.Uint64() - sample[FREES].Value.Uint64()
overPct := 100 * (int(inUse) - int(requestedHeapGoal)) / int(requestedHeapGoal)
if logHeapTweaks {
fmt.Fprintf(os.Stderr, "GCAdjust: Reset GOGC adjust, old goal %d, count is %d, gogc was %d, calcLive %d inUse %d overPct %d\n",
goal, count, oldGogc, calcLive, inUse, overPct)
}
return false
}
forEachGC(adjustFunc)
}

View file

@ -32,12 +32,16 @@ type DebugFlags struct {
DwarfInl int `help:"print information about DWARF inlined function creation"` DwarfInl int `help:"print information about DWARF inlined function creation"`
EscapeMutationsCalls int `help:"print extra escape analysis diagnostics about mutations and calls" concurrent:"ok"` EscapeMutationsCalls int `help:"print extra escape analysis diagnostics about mutations and calls" concurrent:"ok"`
EscapeDebug int `help:"print information about escape analysis and resulting optimizations" concurrent:"ok"` EscapeDebug int `help:"print information about escape analysis and resulting optimizations" concurrent:"ok"`
EscapeAlias int `help:"print information about alias analysis" concurrent:"ok"`
EscapeAliasCheck int `help:"enable additional validation for alias analysis" concurrent:"ok"`
Export int `help:"print export data"` Export int `help:"print export data"`
FIPSHash string `help:"hash value for FIPS debugging" concurrent:"ok"` FIPSHash string `help:"hash value for FIPS debugging" concurrent:"ok"`
Fmahash string `help:"hash value for use in debugging platform-dependent multiply-add use" concurrent:"ok"` Fmahash string `help:"hash value for use in debugging platform-dependent multiply-add use" concurrent:"ok"`
FreeAppend int `help:"insert frees of append results when proven safe (0 disabled, 1 enabled, 2 enabled + log)" concurrent:"ok"`
GCAdjust int `help:"log adjustments to GOGC" concurrent:"ok"` GCAdjust int `help:"log adjustments to GOGC" concurrent:"ok"`
GCCheck int `help:"check heap/gc use by compiler" concurrent:"ok"` GCCheck int `help:"check heap/gc use by compiler" concurrent:"ok"`
GCProg int `help:"print dump of GC programs"` GCProg int `help:"print dump of GC programs"`
GCStart int `help:"specify \"starting\" compiler's heap size in MiB" concurrent:"ok"`
Gossahash string `help:"hash value for use in debugging the compiler"` Gossahash string `help:"hash value for use in debugging the compiler"`
InlFuncsWithClosures int `help:"allow functions with closures to be inlined" concurrent:"ok"` InlFuncsWithClosures int `help:"allow functions with closures to be inlined" concurrent:"ok"`
InlStaticInit int `help:"allow static initialization of inlined calls" concurrent:"ok"` InlStaticInit int `help:"allow static initialization of inlined calls" concurrent:"ok"`

View file

@ -182,6 +182,7 @@ func ParseFlags() {
Debug.AlignHot = 1 Debug.AlignHot = 1
Debug.InlFuncsWithClosures = 1 Debug.InlFuncsWithClosures = 1
Debug.InlStaticInit = 1 Debug.InlStaticInit = 1
Debug.FreeAppend = 1
Debug.PGOInline = 1 Debug.PGOInline = 1
Debug.PGODevirtualize = 2 Debug.PGODevirtualize = 2
Debug.SyncFrames = -1 // disable sync markers by default Debug.SyncFrames = -1 // disable sync markers by default

View file

@ -0,0 +1,272 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package base
import (
"fmt"
"os"
"runtime"
"runtime/debug"
"runtime/metrics"
"sync"
)
// forEachGC calls fn each GC cycle until it returns false.
func forEachGC(fn func() bool) {
type T [32]byte // large enough to avoid runtime's tiny object allocator
var finalizer func(*T)
finalizer = func(p *T) {
if fn() {
runtime.SetFinalizer(p, finalizer)
}
}
finalizer(new(T))
}
// AdjustStartingHeap modifies GOGC so that GC should not occur until the heap
// grows to the requested size. This is intended but not promised, though it
// is true-mostly, depending on when the adjustment occurs and on the
// compiler's input and behavior. Once the live heap is approximately half
// this size, GOGC is reset to its value when AdjustStartingHeap was called;
// subsequent GCs may reduce the heap below the requested size, but this
// function does not affect that.
//
// logHeapTweaks (-d=gcadjust=1) enables logging of GOGC adjustment events.
//
// The temporarily requested GOGC is derated from what would be the "obvious"
// value necessary to hit the starting heap goal because the obvious
// (goal/live-1)*100 value seems to grow RSS a little more than it "should"
// (compared to GOMEMLIMIT, e.g.) and the assumption is that the GC's control
// algorithms are tuned for GOGC near 100, and not tuned for huge values of
// GOGC. Different derating factors apply for "lo" and "hi" values of GOGC;
// lo is below derateBreak, hi is above derateBreak. The derating factors,
// expressed as integer percentages, are derateLoPct and derateHiPct.
// 60-75 is an okay value for derateLoPct, 30-65 seems like a good value for
// derateHiPct, and 600 seems like a good value for derateBreak. If these
// are zero, defaults are used instead.
//
// NOTE: If you think this code would help startup time in your own
// application and you decide to use it, please benchmark first to see if it
// actually works for you (it may not: the Go compiler is not typical), and
// whatever the outcome, please leave a comment on bug #56546. This code
// uses supported interfaces, but depends more than we like on
// current+observed behavior of the garbage collector, so if many people need
// this feature, we should consider/propose a better way to accomplish it.
func AdjustStartingHeap(requestedHeapGoal, derateBreak, derateLoPct, derateHiPct uint64, logHeapTweaks bool) {
mp := runtime.GOMAXPROCS(0)
const (
SHgoal = "/gc/heap/goal:bytes"
SHcount = "/gc/cycles/total:gc-cycles"
SHallocs = "/gc/heap/allocs:bytes"
SHfrees = "/gc/heap/frees:bytes"
)
var sample = []metrics.Sample{{Name: SHgoal}, {Name: SHcount}, {Name: SHallocs}, {Name: SHfrees}}
const (
SH_GOAL = 0
SH_COUNT = 1
SH_ALLOCS = 2
SH_FREES = 3
MB = 1_000_000
)
// These particular magic numbers are designed to make the RSS footprint of -d=-gcstart=2000
// resemble that of GOMEMLIMIT=2000MiB GOGC=10000 when building large projects
// (e.g. the Go compiler itself, and the microsoft's typescript AST package),
// with the further restriction that these magic numbers did a good job of reducing user-cpu
// for builds at either gcstart=2000 or gcstart=128.
//
// The benchmarking to obtain this was (a version of):
//
// for i in {1..50} ; do
// for what in std cmd/compile cmd/fix cmd/go github.com/microsoft/typescript-go/internal/ast ; do
// whatbase=`basename ${what}`
// for sh in 128 2000 ; do
// for br in 500 600 ; do
// for shlo in 65 70; do
// for shhi in 55 60 ; do
// benchcmd -n=2 ${whatbase} go build -a \
// -gcflags=all=-d=gcstart=${sh},gcstartloderate=${shlo},gcstarthiderate=${shhi},gcstartbreak=${br} \
// ${what} | tee -a startheap${sh}_${br}_${shhi}_${shlo}.bench
// done
// done
// done
// done
// done
// done
//
// benchcmd is "go install github.com/aclements/go-misc/benchcmd@latest"
if derateBreak == 0 {
derateBreak = 600
}
if derateLoPct == 0 {
derateLoPct = 70
}
if derateHiPct == 0 {
derateHiPct = 55
}
gogcDerate := func(myGogc uint64) uint64 {
if myGogc < derateBreak {
return (myGogc * derateLoPct) / 100
}
return (myGogc * derateHiPct) / 100
}
// Assumptions and observations of Go's garbage collector, as of Go 1.17-1.20:
// - the initial heap goal is 4MiB, by fiat. It is possible for Go to start
// with a heap as small as 512k, so this may change in the future.
// - except for the first heap goal, heap goal is a function of
// observed-live at the previous GC and current GOGC. After the first
// GC, adjusting GOGC immediately updates GOGC; before the first GC,
// adjusting GOGC does not modify goal (but the change takes effect after
// the first GC).
// - the before/after first GC behavior is not guaranteed anywhere, it's
// just behavior, and it's a bad idea to rely on it.
// - we don't know exactly when GC will run, even after we adjust GOGC; the
// first GC may not have happened yet, may have already happened, or may
// be currently in progress, and GCs can start for several reasons.
// - forEachGC above will run the provided function at some delay after each
// GC's mark phase terminates; finalizers are run after marking as the
// spans containing finalizable objects are swept, driven by GC
// background activity and allocation demand.
// - "live at last GC" is not available through the current metrics
// interface. Instead, live is estimated by knowing the adjusted value of
// GOGC and the new heap goal following a GC (this requires knowing that
// at least one GC has occurred):
// estLive = 100 * newGoal / (100 + currentGogc)
// this new value of GOGC
// newGogc = 100*requestedHeapGoal/estLive - 100
// will result in the desired goal. The logging code checks that the
// resulting goal is correct.
// There's a small risk that the finalizer will be slow to run after a GC
// that expands the goal to a huge value, and that this will lead to
// out-of-memory. This doesn't seem to happen; in experiments on a variety
// of machines with a variety of extra loads to disrupt scheduling, the
// worst overshoot observed was 50% past requestedHeapGoal.
metrics.Read(sample)
for _, s := range sample {
if s.Value.Kind() == metrics.KindBad {
// Just return, a slightly slower compilation is a tolerable outcome.
if logHeapTweaks {
fmt.Fprintf(os.Stderr, "GCAdjust: Regret unexpected KindBad for metric %s\n", s.Name)
}
return
}
}
// Tinker with GOGC to make the heap grow rapidly at first.
currentGoal := sample[SH_GOAL].Value.Uint64() // Believe this will be 4MByte or less, perhaps 512k
myGogc := 100 * requestedHeapGoal / currentGoal
myGogc = gogcDerate(myGogc)
if myGogc <= 125 {
return
}
if logHeapTweaks {
sample := append([]metrics.Sample(nil), sample...) // avoid races with GC callback
AtExit(func() {
metrics.Read(sample)
goal := sample[SH_GOAL].Value.Uint64()
count := sample[SH_COUNT].Value.Uint64()
oldGogc := debug.SetGCPercent(100)
if oldGogc == 100 {
fmt.Fprintf(os.Stderr, "GCAdjust: AtExit goal %dMB gogc %d count %d maxprocs %d\n",
goal/MB, oldGogc, count, mp)
} else {
inUse := sample[SH_ALLOCS].Value.Uint64() - sample[SH_FREES].Value.Uint64()
overPct := 100 * (int(inUse) - int(requestedHeapGoal)) / int(requestedHeapGoal)
fmt.Fprintf(os.Stderr, "GCAdjust: AtExit goal %dMB gogc %d count %d maxprocs %d overPct %d\n",
goal/MB, oldGogc, count, mp, overPct)
}
})
}
originalGOGC := debug.SetGCPercent(int(myGogc))
// forEachGC finalizers ought not overlap, but they could run in separate threads.
// This ought not matter, but just in case it bothers the/a race detector,
// use this mutex.
var forEachGCLock sync.Mutex
adjustFunc := func() bool {
forEachGCLock.Lock()
defer forEachGCLock.Unlock()
metrics.Read(sample)
goal := sample[SH_GOAL].Value.Uint64()
count := sample[SH_COUNT].Value.Uint64()
if goal <= requestedHeapGoal { // Stay the course
if logHeapTweaks {
fmt.Fprintf(os.Stderr, "GCAdjust: Reuse GOGC adjust, current goal %dMB, count is %d, current gogc %d\n",
goal/MB, count, myGogc)
}
return true
}
// Believe goal has been adjusted upwards, else it would be less-than-or-equal to requestedHeapGoal
calcLive := 100 * goal / (100 + myGogc)
if 2*calcLive < requestedHeapGoal { // calcLive can exceed requestedHeapGoal!
myGogc = 100*requestedHeapGoal/calcLive - 100
myGogc = gogcDerate(myGogc)
if myGogc > 125 {
// Not done growing the heap.
oldGogc := debug.SetGCPercent(int(myGogc))
if logHeapTweaks {
// Check that the new goal looks right
inUse := sample[SH_ALLOCS].Value.Uint64() - sample[SH_FREES].Value.Uint64()
metrics.Read(sample)
newGoal := sample[SH_GOAL].Value.Uint64()
pctOff := 100 * (int64(newGoal) - int64(requestedHeapGoal)) / int64(requestedHeapGoal)
// Check that the new goal is close to requested. 3% of make.bash fails this test. Why, TBD.
if pctOff < 2 {
fmt.Fprintf(os.Stderr, "GCAdjust: Retry GOGC adjust, current goal %dMB, count is %d, gogc was %d, is now %d, calcLive %dMB pctOff %d\n",
goal/MB, count, oldGogc, myGogc, calcLive/MB, pctOff)
} else {
// The GC is being annoying and not giving us the goal that we requested, say more to help understand when/why.
fmt.Fprintf(os.Stderr, "GCAdjust: Retry GOGC adjust, current goal %dMB, count is %d, gogc was %d, is now %d, calcLive %dMB pctOff %d inUse %dMB\n",
goal/MB, count, oldGogc, myGogc, calcLive/MB, pctOff, inUse/MB)
}
}
return true
}
}
// In this case we're done boosting GOGC, set it to its original value and don't set a new finalizer.
oldGogc := debug.SetGCPercent(originalGOGC)
// inUse helps estimate how late the finalizer ran; at the instant the previous GC ended,
// it was (in theory) equal to the previous GC's heap goal. In a growing heap it is
// expected to grow to the new heap goal.
if logHeapTweaks {
inUse := sample[SH_ALLOCS].Value.Uint64() - sample[SH_FREES].Value.Uint64()
overPct := 100 * (int(inUse) - int(requestedHeapGoal)) / int(requestedHeapGoal)
fmt.Fprintf(os.Stderr, "GCAdjust: Reset GOGC adjust, old goal %dMB, count is %d, gogc was %d, gogc is now %d, calcLive %dMB inUse %dMB overPct %d\n",
goal/MB, count, oldGogc, originalGOGC, calcLive/MB, inUse/MB, overPct)
}
return false
}
forEachGC(adjustFunc)
}

View file

@ -42,40 +42,50 @@ import (
"cmd/compile/internal/reflectdata" "cmd/compile/internal/reflectdata"
"cmd/compile/internal/typecheck" "cmd/compile/internal/typecheck"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"fmt" "cmd/internal/src"
) )
// getNameFromNode tries to iteratively peel down the node to // getNameFromNode tries to iteratively peel down the node to
// get the name. // get the name.
func getNameFromNode(n ir.Node) *ir.Name { func getNameFromNode(n ir.Node) *ir.Name {
var ret *ir.Name // Tries to iteratively peel down the node to get the names.
if n.Op() == ir.ONAME { for n != nil {
ret = n.(*ir.Name) switch n.Op() {
} else { case ir.ONAME:
// avoid infinite recursion on circular referencing nodes. // Found the name, stop the loop.
seen := map[ir.Node]bool{n: true} return n.(*ir.Name)
var findName func(ir.Node) bool case ir.OSLICE, ir.OSLICE3:
findName = func(a ir.Node) bool { n = n.(*ir.SliceExpr).X
if a.Op() == ir.ONAME { case ir.ODOT:
ret = a.(*ir.Name) n = n.(*ir.SelectorExpr).X
return true case ir.OCONV, ir.OCONVIFACE, ir.OCONVNOP:
n = n.(*ir.ConvExpr).X
case ir.OADDR:
n = n.(*ir.AddrExpr).X
case ir.ODOTPTR:
n = n.(*ir.SelectorExpr).X
case ir.OINDEX, ir.OINDEXMAP:
n = n.(*ir.IndexExpr).X
default:
n = nil
} }
if !seen[a] {
seen[a] = true
return ir.DoChildren(a, findName)
} }
return false return nil
} }
ir.DoChildren(n, findName)
// getAddressableNameFromNode is like getNameFromNode but returns nil if the node is not addressable.
func getAddressableNameFromNode(n ir.Node) *ir.Name {
if name := getNameFromNode(n); name != nil && ir.IsAddressable(name) {
return name
} }
return ret return nil
} }
// keepAliveAt returns a statement that is either curNode, or a // keepAliveAt returns a statement that is either curNode, or a
// block containing curNode followed by a call to runtime.keepAlive for each // block containing curNode followed by a call to runtime.KeepAlive for each
// ONAME in ns. These calls ensure that names in ns will be live until // node in ns. These calls ensure that nodes in ns will be live until
// after curNode's execution. // after curNode's execution.
func keepAliveAt(ns []*ir.Name, curNode ir.Node) ir.Node { func keepAliveAt(ns []ir.Node, curNode ir.Node) ir.Node {
if len(ns) == 0 { if len(ns) == 0 {
return curNode return curNode
} }
@ -92,7 +102,10 @@ func keepAliveAt(ns []*ir.Name, curNode ir.Node) ir.Node {
if n.Sym().IsBlank() { if n.Sym().IsBlank() {
continue continue
} }
arg := ir.NewConvExpr(pos, ir.OCONV, types.Types[types.TINTER], n) if !ir.IsAddressable(n) {
base.FatalfAt(n.Pos(), "keepAliveAt: node %v is not addressable", n)
}
arg := ir.NewConvExpr(pos, ir.OCONV, types.Types[types.TUNSAFEPTR], typecheck.NodAddr(n))
if !n.Type().IsInterface() { if !n.Type().IsInterface() {
srcRType0 := reflectdata.TypePtrAt(pos, n.Type()) srcRType0 := reflectdata.TypePtrAt(pos, n.Type())
arg.TypeWord = srcRType0 arg.TypeWord = srcRType0
@ -109,12 +122,12 @@ func keepAliveAt(ns []*ir.Name, curNode ir.Node) ir.Node {
return ir.NewBlockStmt(pos, calls) return ir.NewBlockStmt(pos, calls)
} }
func debugName(name *ir.Name, line string) { func debugName(name *ir.Name, pos src.XPos) {
if base.Flag.LowerM > 0 { if base.Flag.LowerM > 1 {
if name.Linksym() != nil { if name.Linksym() != nil {
fmt.Printf("%v: %s will be kept alive\n", line, name.Linksym().Name) base.WarnfAt(pos, "%s will be kept alive", name.Linksym().Name)
} else { } else {
fmt.Printf("%v: expr will be kept alive\n", line) base.WarnfAt(pos, "expr will be kept alive")
} }
} }
} }
@ -127,31 +140,52 @@ func preserveStmt(curFn *ir.Func, stmt ir.Node) (ret ir.Node) {
switch n := stmt.(type) { switch n := stmt.(type) {
case *ir.AssignStmt: case *ir.AssignStmt:
// Peel down struct and slice indexing to get the names // Peel down struct and slice indexing to get the names
name := getNameFromNode(n.X) name := getAddressableNameFromNode(n.X)
if name != nil { if name != nil {
debugName(name, ir.Line(stmt)) debugName(name, n.Pos())
ret = keepAliveAt([]*ir.Name{name}, n) ret = keepAliveAt([]ir.Node{name}, n)
} else if deref := n.X.(*ir.StarExpr); deref != nil {
ret = keepAliveAt([]ir.Node{deref}, n)
if base.Flag.LowerM > 1 {
base.WarnfAt(n.Pos(), "dereference will be kept alive")
}
} else if base.Flag.LowerM > 1 {
base.WarnfAt(n.Pos(), "expr is unknown to bloop pass")
} }
case *ir.AssignListStmt: case *ir.AssignListStmt:
names := []*ir.Name{} ns := []ir.Node{}
for _, lhs := range n.Lhs { for _, lhs := range n.Lhs {
name := getNameFromNode(lhs) name := getAddressableNameFromNode(lhs)
if name != nil { if name != nil {
debugName(name, ir.Line(stmt)) debugName(name, n.Pos())
names = append(names, name) ns = append(ns, name)
} else if deref := lhs.(*ir.StarExpr); deref != nil {
ns = append(ns, deref)
if base.Flag.LowerM > 1 {
base.WarnfAt(n.Pos(), "dereference will be kept alive")
}
} else if base.Flag.LowerM > 1 {
base.WarnfAt(n.Pos(), "expr is unknown to bloop pass")
} }
} }
ret = keepAliveAt(names, n) ret = keepAliveAt(ns, n)
case *ir.AssignOpStmt: case *ir.AssignOpStmt:
name := getNameFromNode(n.X) name := getAddressableNameFromNode(n.X)
if name != nil { if name != nil {
debugName(name, ir.Line(stmt)) debugName(name, n.Pos())
ret = keepAliveAt([]*ir.Name{name}, n) ret = keepAliveAt([]ir.Node{name}, n)
} else if deref := n.X.(*ir.StarExpr); deref != nil {
ret = keepAliveAt([]ir.Node{deref}, n)
if base.Flag.LowerM > 1 {
base.WarnfAt(n.Pos(), "dereference will be kept alive")
}
} else if base.Flag.LowerM > 1 {
base.WarnfAt(n.Pos(), "expr is unknown to bloop pass")
} }
case *ir.CallExpr: case *ir.CallExpr:
names := []*ir.Name{}
curNode := stmt curNode := stmt
if n.Fun != nil && n.Fun.Type() != nil && n.Fun.Type().NumResults() != 0 { if n.Fun != nil && n.Fun.Type() != nil && n.Fun.Type().NumResults() != 0 {
ns := []ir.Node{}
// This function's results are not assigned, assign them to // This function's results are not assigned, assign them to
// auto tmps and then keepAliveAt these autos. // auto tmps and then keepAliveAt these autos.
// Note: markStmt assumes the context that it's called - this CallExpr is // Note: markStmt assumes the context that it's called - this CallExpr is
@ -161,7 +195,7 @@ func preserveStmt(curFn *ir.Func, stmt ir.Node) (ret ir.Node) {
for i, res := range results { for i, res := range results {
tmp := typecheck.TempAt(n.Pos(), curFn, res.Type) tmp := typecheck.TempAt(n.Pos(), curFn, res.Type)
lhs[i] = tmp lhs[i] = tmp
names = append(names, tmp) ns = append(ns, tmp)
} }
// Create an assignment statement. // Create an assignment statement.
@ -174,33 +208,35 @@ func preserveStmt(curFn *ir.Func, stmt ir.Node) (ret ir.Node) {
if len(results) > 1 { if len(results) > 1 {
plural = "s" plural = "s"
} }
if base.Flag.LowerM > 0 { if base.Flag.LowerM > 1 {
fmt.Printf("%v: function result%s will be kept alive\n", ir.Line(stmt), plural) base.WarnfAt(n.Pos(), "function result%s will be kept alive", plural)
} }
ret = keepAliveAt(ns, curNode)
} else { } else {
// This function probably doesn't return anything, keep its args alive. // This function probably doesn't return anything, keep its args alive.
argTmps := []ir.Node{} argTmps := []ir.Node{}
names := []ir.Node{}
for i, a := range n.Args { for i, a := range n.Args {
if name := getNameFromNode(a); name != nil { if name := getAddressableNameFromNode(a); name != nil {
// If they are name, keep them alive directly. // If they are name, keep them alive directly.
debugName(name, ir.Line(stmt)) debugName(name, n.Pos())
names = append(names, name) names = append(names, name)
} else if a.Op() == ir.OSLICELIT { } else if a.Op() == ir.OSLICELIT {
// variadic args are encoded as slice literal. // variadic args are encoded as slice literal.
s := a.(*ir.CompLitExpr) s := a.(*ir.CompLitExpr)
ns := []*ir.Name{} ns := []ir.Node{}
for i, n := range s.List { for i, elem := range s.List {
if name := getNameFromNode(n); name != nil { if name := getAddressableNameFromNode(elem); name != nil {
debugName(name, ir.Line(a)) debugName(name, n.Pos())
ns = append(ns, name) ns = append(ns, name)
} else { } else {
// We need a temporary to save this arg. // We need a temporary to save this arg.
tmp := typecheck.TempAt(n.Pos(), curFn, n.Type()) tmp := typecheck.TempAt(elem.Pos(), curFn, elem.Type())
argTmps = append(argTmps, typecheck.AssignExpr(ir.NewAssignStmt(n.Pos(), tmp, n))) argTmps = append(argTmps, typecheck.AssignExpr(ir.NewAssignStmt(elem.Pos(), tmp, elem)))
names = append(names, tmp) names = append(names, tmp)
s.List[i] = tmp s.List[i] = tmp
if base.Flag.LowerM > 0 { if base.Flag.LowerM > 1 {
fmt.Printf("%v: function arg will be kept alive\n", ir.Line(n)) base.WarnfAt(n.Pos(), "function arg will be kept alive")
} }
} }
} }
@ -212,8 +248,8 @@ func preserveStmt(curFn *ir.Func, stmt ir.Node) (ret ir.Node) {
argTmps = append(argTmps, typecheck.AssignExpr(ir.NewAssignStmt(n.Pos(), tmp, a))) argTmps = append(argTmps, typecheck.AssignExpr(ir.NewAssignStmt(n.Pos(), tmp, a)))
names = append(names, tmp) names = append(names, tmp)
n.Args[i] = tmp n.Args[i] = tmp
if base.Flag.LowerM > 0 { if base.Flag.LowerM > 1 {
fmt.Printf("%v: function arg will be kept alive\n", ir.Line(stmt)) base.WarnfAt(n.Pos(), "function arg will be kept alive")
} }
} }
} }
@ -221,9 +257,9 @@ func preserveStmt(curFn *ir.Func, stmt ir.Node) (ret ir.Node) {
argTmps = append(argTmps, n) argTmps = append(argTmps, n)
curNode = ir.NewBlockStmt(n.Pos(), argTmps) curNode = ir.NewBlockStmt(n.Pos(), argTmps)
} }
}
ret = keepAliveAt(names, curNode) ret = keepAliveAt(names, curNode)
} }
}
return return
} }
@ -282,6 +318,8 @@ func (e editor) edit(n ir.Node) ir.Node {
preserveStmts(e.curFn, n.Body) preserveStmts(e.curFn, n.Body)
case *ir.CommClause: case *ir.CommClause:
preserveStmts(e.curFn, n.Body) preserveStmts(e.curFn, n.Body)
case *ir.RangeStmt:
preserveStmts(e.curFn, n.Body)
} }
} }
return n return n

View file

@ -0,0 +1,528 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package escape
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/internal/src"
"fmt"
"maps"
"path/filepath"
)
type aliasAnalysis struct {
// fn is the function being analyzed.
fn *ir.Func
// candidateSlices are declared slices that
// start unaliased and might still be unaliased.
candidateSlices map[*ir.Name]candidateSlice
// noAliasAppends are appends that have been
// proven to use an unaliased slice.
noAliasAppends []*ir.CallExpr
// loops is a stack of observed loops,
// each with a list of candidate appends.
loops [][]candidateAppend
// State for optional validation checking (doubleCheck mode):
processed map[ir.Node]int // count of times each node was processed, for doubleCheck mode
doubleCheck bool // whether to do doubleCheck mode
}
// candidateSlice tracks information about a declared slice
// that might be unaliased.
type candidateSlice struct {
loopDepth int // depth of loop when slice was declared
}
// candidateAppend tracks information about an OAPPEND that
// might be using an unaliased slice.
type candidateAppend struct {
s *ir.Name // the slice argument in 's = append(s, ...)'
call *ir.CallExpr // the append call
}
// aliasAnalysis looks for specific patterns of slice usage and proves
// that certain appends are operating on non-aliased slices.
//
// This allows us to emit calls to free the backing arrays for certain
// non-aliased slices at runtime when we know the memory is logically dead.
//
// The analysis is conservative, giving up on any operation we do not
// explicitly understand.
func (aa *aliasAnalysis) analyze(fn *ir.Func) {
// Walk the function body to discover slice declarations, their uses,
// and any append that we can prove is using an unaliased slice.
//
// An example is:
//
// var s []T
// for _, v := range input {
// f()
// s = append(s, g(v)) // s cannot be aliased here
// h()
// }
// return s
//
// Here, we can prove that the append to s is operating on an unaliased slice,
// and that conclusion is unaffected by s later being returned and escaping.
//
// In contrast, in this example, the aliasing of s in the loop body means the
// append can be operating on an aliased slice, so we do not record s as unaliased:
//
// var s []T
// var alias []T
// for _, v := range input {
// s = append(s, v) // s is aliased on second pass through loop body
// alias = s
// }
//
// Arbitrary uses of s after an append do not affect the aliasing conclusion
// for that append, but only if the append cannot be revisited at execution time
// via a loop or goto.
//
// We track the loop depth when a slice was declared and verify all uses of a slice
// are non-aliasing until we return to that depth. In other words, we make sure
// we have processed any possible execution-time revisiting of the slice prior
// to making our final determination.
//
// This approach helps for example with nested loops, such as:
//
// var s []int
// for range 10 {
// for range 10 {
// s = append(s, 0) // s is proven as non-aliased here
// }
// }
// alias = s // both loops are complete
//
// Or in contrast:
//
// var s []int
// for range 10 {
// for range 10 {
// s = append(s, 0) // s is treated as aliased here
// }
// alias = s // aliased, and outermost loop cycles back
// }
//
// As we walk the function, we look for things like:
//
// 1. Slice declarations (currently supporting 'var s []T', 's := make([]T, ...)',
// and 's := []T{...}').
// 2. Appends to a slice of the form 's = append(s, ...)'.
// 3. Other uses of the slice, which we treat as potential aliasing outside
// of a few known safe cases.
// 4. A start of a loop, which we track in a stack so that
// any uses of a slice within a loop body are treated as potential
// aliasing, including statements in the loop body after an append.
// Candidate appends are stored in the loop stack at the loop depth of their
// corresponding slice declaration (rather than the loop depth of the append),
// which essentially postpones a decision about the candidate append.
// 5. An end of a loop, which pops the loop stack and allows us to
// conclusively treat candidate appends from the loop body based
// on the loop depth of the slice declaration.
//
// Note that as we pop a candidate append at the end of a loop, we know
// its corresponding slice was unaliased throughout the loop being popped
// if the slice is still in the candidate slice map (without having been
// removed for potential aliasing), and we know we can make a final decision
// about a candidate append if we have returned to the loop depth
// where its slice was declared. In other words, there is no unanalyzed
// control flow that could take us back at execution-time to the
// candidate append in the now analyzed loop. This helps for example
// with nested loops, such as in our examples just above.
//
// We give up on a particular candidate slice if we see any use of it
// that we don't explicitly understand, and we give up on all of
// our candidate slices if we see any goto or label, which could be
// unstructured control flow. (TODO(thepudds): we remove the goto/label
// restriction in a subsequent CL.)
//
// Note that the intended use is to indicate that a slice is safe to pass
// to runtime.freegc, which currently requires that the passed pointer
// point to the base of its heap object.
//
// Therefore, we currently do not allow any re-slicing of the slice, though we could
// potentially allow s[0:x] or s[:x] or similar. (Slice expressions that alter
// the capacity might be possible to allow with freegc changes, though they are
// currently disallowed here like all slice expressions).
//
// TODO(thepudds): we could support the slice being used as non-escaping function call parameter
// but to do that, we need to verify any creation of specials via user code triggers an escape,
// or mail better runtime.freegc support for specials, or have a temporary compile-time solution
// for specials. (Currently, this analysis side-steps specials because any use of a slice
// that might cause a user-created special will cause it to be treated as aliased, and
// separately, runtime.freegc handles profiling-related specials).
// Initialize.
aa.fn = fn
aa.candidateSlices = make(map[*ir.Name]candidateSlice) // slices that might be unaliased
// doubleCheck controls whether we do a sanity check of our processing logic
// by counting each node visited in our main pass, and then comparing those counts
// against a simple walk at the end. The main intent is to help catch missing
// any nodes squirreled away in some spot we forgot to examine in our main pass.
aa.doubleCheck = base.Debug.EscapeAliasCheck > 0
aa.processed = make(map[ir.Node]int)
if base.Debug.EscapeAlias >= 2 {
aa.diag(fn.Pos(), fn, "====== starting func", "======")
}
ir.DoChildren(fn, aa.visit)
for _, call := range aa.noAliasAppends {
if base.Debug.EscapeAlias >= 1 {
base.WarnfAt(call.Pos(), "alias analysis: append using non-aliased slice: %v in func %v",
call, fn)
}
if base.Debug.FreeAppend > 0 {
call.AppendNoAlias = true
}
}
if aa.doubleCheck {
doubleCheckProcessed(fn, aa.processed)
}
}
func (aa *aliasAnalysis) visit(n ir.Node) bool {
if n == nil {
return false
}
if base.Debug.EscapeAlias >= 3 {
fmt.Printf("%-25s alias analysis: visiting node: %12s %-18T %v\n",
fmtPosShort(n.Pos())+":", n.Op().String(), n, n)
}
// As we visit nodes, we want to ensure we handle all children
// without missing any (through ignorance or future changes).
// We do this by counting nodes as we visit them or otherwise
// declare a node to be fully processed.
//
// In particular, we want to ensure we don't miss the use
// of a slice in some expression that might be an aliasing usage.
//
// When doubleCheck is enabled, we compare the counts
// accumulated in our analysis against counts from a trivial walk,
// failing if there is any mismatch.
//
// This call here counts that we have visited this node n
// via our main visit method. (In contrast, some nodes won't
// be visited by the main visit method, but instead will be
// manually marked via countProcessed when we believe we have fully
// dealt with the node).
aa.countProcessed(n)
switch n.Op() {
case ir.ODCL:
decl := n.(*ir.Decl)
if decl.X != nil && decl.X.Type().IsSlice() && decl.X.Class == ir.PAUTO {
s := decl.X
if _, ok := aa.candidateSlices[s]; ok {
base.FatalfAt(n.Pos(), "candidate slice already tracked as candidate: %v", s)
}
if base.Debug.EscapeAlias >= 2 {
aa.diag(n.Pos(), s, "adding candidate slice", "(loop depth: %d)", len(aa.loops))
}
aa.candidateSlices[s] = candidateSlice{loopDepth: len(aa.loops)}
}
// No children aside from the declared ONAME.
aa.countProcessed(decl.X)
return false
case ir.ONAME:
// We are seeing a name we have not already handled in another case,
// so remove any corresponding candidate slice.
if n.Type().IsSlice() {
name := n.(*ir.Name)
_, ok := aa.candidateSlices[name]
if ok {
delete(aa.candidateSlices, name)
if base.Debug.EscapeAlias >= 2 {
aa.diag(n.Pos(), name, "removing candidate slice", "")
}
}
}
// No children.
return false
case ir.OAS2:
n := n.(*ir.AssignListStmt)
aa.analyzeAssign(n, n.Lhs, n.Rhs)
return false
case ir.OAS:
assign := n.(*ir.AssignStmt)
aa.analyzeAssign(n, []ir.Node{assign.X}, []ir.Node{assign.Y})
return false
case ir.OFOR, ir.ORANGE:
aa.visitList(n.Init())
if n.Op() == ir.ORANGE {
// TODO(thepudds): previously we visited this range expression
// in the switch just below, after pushing the loop. This current placement
// is more correct, but generate a test or find an example in stdlib or similar
// where it matters. (Our current tests do not complain.)
aa.visit(n.(*ir.RangeStmt).X)
}
// Push a new loop.
aa.loops = append(aa.loops, nil)
// Process the loop.
switch n.Op() {
case ir.OFOR:
forstmt := n.(*ir.ForStmt)
aa.visit(forstmt.Cond)
aa.visitList(forstmt.Body)
aa.visit(forstmt.Post)
case ir.ORANGE:
rangestmt := n.(*ir.RangeStmt)
aa.visit(rangestmt.Key)
aa.visit(rangestmt.Value)
aa.visitList(rangestmt.Body)
default:
base.Fatalf("loop not OFOR or ORANGE: %v", n)
}
// Pop the loop.
var candidateAppends []candidateAppend
candidateAppends, aa.loops = aa.loops[len(aa.loops)-1], aa.loops[:len(aa.loops)-1]
for _, a := range candidateAppends {
// We are done with the loop, so we can validate any candidate appends
// that have not had their slice removed yet. We know a slice is unaliased
// throughout the loop if the slice is still in the candidate slice map.
if cs, ok := aa.candidateSlices[a.s]; ok {
if cs.loopDepth == len(aa.loops) {
// We've returned to the loop depth where the slice was declared and
// hence made it all the way through any loops that started after
// that declaration.
if base.Debug.EscapeAlias >= 2 {
aa.diag(n.Pos(), a.s, "proved non-aliased append",
"(completed loop, decl at depth: %d)", cs.loopDepth)
}
aa.noAliasAppends = append(aa.noAliasAppends, a.call)
} else if cs.loopDepth < len(aa.loops) {
if base.Debug.EscapeAlias >= 2 {
aa.diag(n.Pos(), a.s, "cannot prove non-aliased append",
"(completed loop, decl at depth: %d)", cs.loopDepth)
}
} else {
panic("impossible: candidate slice loopDepth > current loop depth")
}
}
}
return false
case ir.OLEN, ir.OCAP:
n := n.(*ir.UnaryExpr)
if n.X.Op() == ir.ONAME {
// This does not disqualify a candidate slice.
aa.visitList(n.Init())
aa.countProcessed(n.X)
} else {
ir.DoChildren(n, aa.visit)
}
return false
case ir.OCLOSURE:
// Give up on all our in-progress slices.
closure := n.(*ir.ClosureExpr)
if base.Debug.EscapeAlias >= 2 {
aa.diag(n.Pos(), closure.Func, "clearing all in-progress slices due to OCLOSURE",
"(was %d in-progress slices)", len(aa.candidateSlices))
}
clear(aa.candidateSlices)
return ir.DoChildren(n, aa.visit)
case ir.OLABEL, ir.OGOTO:
// Give up on all our in-progress slices.
if base.Debug.EscapeAlias >= 2 {
aa.diag(n.Pos(), n, "clearing all in-progress slices due to label or goto",
"(was %d in-progress slices)", len(aa.candidateSlices))
}
clear(aa.candidateSlices)
return false
default:
return ir.DoChildren(n, aa.visit)
}
}
func (aa *aliasAnalysis) visitList(nodes []ir.Node) {
for _, n := range nodes {
aa.visit(n)
}
}
// analyzeAssign evaluates the assignment dsts... = srcs...
//
// assign is an *ir.AssignStmt or *ir.AssignListStmt.
func (aa *aliasAnalysis) analyzeAssign(assign ir.Node, dsts, srcs []ir.Node) {
aa.visitList(assign.Init())
for i := range dsts {
dst := dsts[i]
src := srcs[i]
if dst.Op() != ir.ONAME || !dst.Type().IsSlice() {
// Nothing for us to do aside from visiting the remaining children.
aa.visit(dst)
aa.visit(src)
continue
}
// We have a slice being assigned to an ONAME.
// Check for simple zero value assignments to an ONAME, which we ignore.
if src == nil {
aa.countProcessed(dst)
continue
}
if base.Debug.EscapeAlias >= 4 {
srcfn := ""
if src.Op() == ir.ONAME {
srcfn = fmt.Sprintf("%v.", src.Name().Curfn)
}
aa.diag(assign.Pos(), assign, "visiting slice assignment", "%v.%v = %s%v (%s %T = %s %T)",
dst.Name().Curfn, dst, srcfn, src, dst.Op().String(), dst, src.Op().String(), src)
}
// Now check what we have on the RHS.
switch src.Op() {
// Cases:
// Check for s := make([]T, ...) or s := []T{...}, along with the '=' version
// of those which does not alias s as long as s is not used in the make.
//
// TODO(thepudds): we need to be sure that 's := []T{1,2,3}' does not end up backed by a
// global static. Ad-hoc testing indicates that example and similar seem to be
// stack allocated, but that was not exhaustive testing. We do have runtime.freegc
// able to throw if it finds a global static, but should test more.
//
// TODO(thepudds): could also possibly allow 's := append([]T(nil), ...)'
// and 's := append([]T{}, ...)'.
case ir.OMAKESLICE, ir.OSLICELIT:
name := dst.(*ir.Name)
if name.Class == ir.PAUTO {
if base.Debug.EscapeAlias > 1 {
aa.diag(assign.Pos(), assign, "assignment from make or slice literal", "")
}
// If this is Def=true, the ODCL in the init will causes this to be tracked
// as a candidate slice. We walk the init and RHS but avoid visiting the name
// in the LHS, which would remove the slice from the candidate list after it
// was just added.
aa.visit(src)
aa.countProcessed(name)
continue
}
// Check for s = append(s, <...>).
case ir.OAPPEND:
s := dst.(*ir.Name)
call := src.(*ir.CallExpr)
if call.Args[0] == s {
// Matches s = append(s, <...>).
// First visit other arguments in case they use s.
aa.visitList(call.Args[1:])
// Mark the call as processed, and s twice.
aa.countProcessed(s, call, s)
// We have now examined all non-ONAME children of assign.
// This is now the heart of the analysis.
// Check to see if this slice is a live candidate.
cs, ok := aa.candidateSlices[s]
if ok {
if cs.loopDepth == len(aa.loops) {
// No new loop has started after the declaration of s,
// so this is definitive.
if base.Debug.EscapeAlias >= 2 {
aa.diag(assign.Pos(), assign, "proved non-aliased append",
"(loop depth: %d, equals decl depth)", len(aa.loops))
}
aa.noAliasAppends = append(aa.noAliasAppends, call)
} else if cs.loopDepth < len(aa.loops) {
// A new loop has started since the declaration of s,
// so we can't validate this append yet, but
// remember it in case we can validate it later when
// all loops using s are done.
aa.loops[cs.loopDepth] = append(aa.loops[cs.loopDepth],
candidateAppend{s: s, call: call})
} else {
panic("impossible: candidate slice loopDepth > current loop depth")
}
}
continue
}
} // End of switch on src.Op().
// Reached bottom of the loop over assignments.
// If we get here, we need to visit the dst and src normally.
aa.visit(dst)
aa.visit(src)
}
}
func (aa *aliasAnalysis) countProcessed(nodes ...ir.Node) {
if aa.doubleCheck {
for _, n := range nodes {
aa.processed[n]++
}
}
}
func (aa *aliasAnalysis) diag(pos src.XPos, n ir.Node, what string, format string, args ...any) {
fmt.Printf("%-25s alias analysis: %-30s %-20s %s\n",
fmtPosShort(pos)+":",
what+":",
fmt.Sprintf("%v", n),
fmt.Sprintf(format, args...))
}
// doubleCheckProcessed does a sanity check for missed nodes in our visit.
func doubleCheckProcessed(fn *ir.Func, processed map[ir.Node]int) {
// Do a trivial walk while counting the nodes
// to compare against the counts in processed.
observed := make(map[ir.Node]int)
var walk func(n ir.Node) bool
walk = func(n ir.Node) bool {
observed[n]++
return ir.DoChildren(n, walk)
}
ir.DoChildren(fn, walk)
if !maps.Equal(processed, observed) {
// The most likely mistake might be something was missed while building processed,
// so print extra details in that direction.
for n, observedCount := range observed {
processedCount, ok := processed[n]
if processedCount != observedCount || !ok {
base.WarnfAt(n.Pos(),
"alias analysis: mismatch for %T: %v: processed %d times, observed %d times",
n, n, processedCount, observedCount)
}
}
base.FatalfAt(fn.Pos(), "alias analysis: mismatch in visited nodes")
}
}
func fmtPosShort(xpos src.XPos) string {
// TODO(thepudds): I think I did this a simpler way a while ago? Or maybe add base.FmtPosShort
// or similar? Or maybe just use base.FmtPos and give up on nicely aligned log messages?
pos := base.Ctxt.PosTable.Pos(xpos)
shortLine := filepath.Base(pos.AbsFilename()) + ":" + pos.LineNumber()
return shortLine
}

View file

@ -8,6 +8,7 @@ import (
"fmt" "fmt"
"go/constant" "go/constant"
"go/token" "go/token"
"internal/goexperiment"
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
@ -369,6 +370,16 @@ func (b *batch) finish(fns []*ir.Func) {
} }
} }
} }
if goexperiment.RuntimeFreegc {
// Look for specific patterns of usage, such as appends
// to slices that we can prove are not aliased.
for _, fn := range fns {
a := aliasAnalysis{}
a.analyze(fn)
}
}
} }
// inMutualBatch reports whether function fn is in the batch of // inMutualBatch reports whether function fn is in the batch of

View file

@ -83,10 +83,13 @@ func Main(archInit func(*ssagen.ArchInfo)) {
base.DebugSSA = ssa.PhaseOption base.DebugSSA = ssa.PhaseOption
base.ParseFlags() base.ParseFlags()
if os.Getenv("GOGC") == "" { // GOGC set disables starting heap adjustment if flagGCStart := base.Debug.GCStart; flagGCStart > 0 || // explicit flags overrides environment variable disable of GC boost
// More processors will use more heap, but assume that more memory is available. os.Getenv("GOGC") == "" && os.Getenv("GOMEMLIMIT") == "" && base.Flag.LowerC != 1 { // explicit GC knobs or no concurrency implies default heap
// So 1 processor -> 40MB, 4 -> 64MB, 12 -> 128MB startHeapMB := int64(128)
base.AdjustStartingHeap(uint64(32+8*base.Flag.LowerC) << 20) if flagGCStart > 0 {
startHeapMB = int64(flagGCStart)
}
base.AdjustStartingHeap(uint64(startHeapMB)<<20, 0, 0, 0, base.Debug.GCAdjust == 1)
} }
types.LocalPkg = types.NewPkg(base.Ctxt.Pkgpath, "") types.LocalPkg = types.NewPkg(base.Ctxt.Pkgpath, "")

View file

@ -20,6 +20,13 @@ import (
// DevirtualizeAndInlinePackage interleaves devirtualization and inlining on // DevirtualizeAndInlinePackage interleaves devirtualization and inlining on
// all functions within pkg. // all functions within pkg.
func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) { func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) {
if base.Flag.W > 1 {
for _, fn := range typecheck.Target.Funcs {
s := fmt.Sprintf("\nbefore devirtualize-and-inline %v", fn.Sym())
ir.DumpList(s, fn.Body)
}
}
if profile != nil && base.Debug.PGODevirtualize > 0 { if profile != nil && base.Debug.PGODevirtualize > 0 {
// TODO(mdempsky): Integrate into DevirtualizeAndInlineFunc below. // TODO(mdempsky): Integrate into DevirtualizeAndInlineFunc below.
ir.VisitFuncsBottomUp(typecheck.Target.Funcs, func(list []*ir.Func, recursive bool) { ir.VisitFuncsBottomUp(typecheck.Target.Funcs, func(list []*ir.Func, recursive bool) {

View file

@ -193,6 +193,7 @@ type CallExpr struct {
GoDefer bool // whether this call is part of a go or defer statement GoDefer bool // whether this call is part of a go or defer statement
NoInline bool // whether this call must not be inlined NoInline bool // whether this call must not be inlined
UseBuf bool // use stack buffer for backing store (OAPPEND only) UseBuf bool // use stack buffer for backing store (OAPPEND only)
AppendNoAlias bool // backing store proven to be unaliased (OAPPEND only)
// whether it's a runtime.KeepAlive call the compiler generates to // whether it's a runtime.KeepAlive call the compiler generates to
// keep a variable alive. See #73137. // keep a variable alive. See #73137.
IsCompilerVarLive bool IsCompilerVarLive bool

View file

@ -1188,6 +1188,9 @@ func (n *MoveToHeapExpr) doChildren(do func(Node) bool) bool {
if n.Slice != nil && do(n.Slice) { if n.Slice != nil && do(n.Slice) {
return true return true
} }
if n.RType != nil && do(n.RType) {
return true
}
return false return false
} }
func (n *MoveToHeapExpr) doChildrenWithHidden(do func(Node) bool) bool { func (n *MoveToHeapExpr) doChildrenWithHidden(do func(Node) bool) bool {
@ -1198,6 +1201,9 @@ func (n *MoveToHeapExpr) editChildren(edit func(Node) Node) {
if n.Slice != nil { if n.Slice != nil {
n.Slice = edit(n.Slice).(Node) n.Slice = edit(n.Slice).(Node)
} }
if n.RType != nil {
n.RType = edit(n.RType).(Node)
}
} }
func (n *MoveToHeapExpr) editChildrenWithHidden(edit func(Node) Node) { func (n *MoveToHeapExpr) editChildrenWithHidden(edit func(Node) Node) {
n.editChildren(edit) n.editChildren(edit)

View file

@ -30,6 +30,8 @@ type symsStruct struct {
Goschedguarded *obj.LSym Goschedguarded *obj.LSym
Growslice *obj.LSym Growslice *obj.LSym
GrowsliceBuf *obj.LSym GrowsliceBuf *obj.LSym
GrowsliceBufNoAlias *obj.LSym
GrowsliceNoAlias *obj.LSym
MoveSlice *obj.LSym MoveSlice *obj.LSym
MoveSliceNoScan *obj.LSym MoveSliceNoScan *obj.LSym
MoveSliceNoCap *obj.LSym MoveSliceNoCap *obj.LSym
@ -40,6 +42,7 @@ type symsStruct struct {
MallocGCSmallScanNoHeader [27]*obj.LSym MallocGCSmallScanNoHeader [27]*obj.LSym
MallocGCTiny [16]*obj.LSym MallocGCTiny [16]*obj.LSym
Memmove *obj.LSym Memmove *obj.LSym
Memequal *obj.LSym
Msanread *obj.LSym Msanread *obj.LSym
Msanwrite *obj.LSym Msanwrite *obj.LSym
Msanmove *obj.LSym Msanmove *obj.LSym

View file

@ -152,6 +152,11 @@ func analyze(fn *ir.Func) {
// least weight 2. (Note: appends in loops have weight >= 2.) // least weight 2. (Note: appends in loops have weight >= 2.)
appendWeight int appendWeight int
// Loop depth at declaration point.
// Use for heuristics only, it is not guaranteed to be correct
// in the presence of gotos.
declDepth int
// Whether we ever do cap(s), or other operations that use cap(s) // Whether we ever do cap(s), or other operations that use cap(s)
// (possibly implicitly), like s[i:j]. // (possibly implicitly), like s[i:j].
capUsed bool capUsed bool
@ -209,6 +214,20 @@ func analyze(fn *ir.Func) {
i.s.Opt = nil i.s.Opt = nil
return return
} }
if loopDepth > i.declDepth {
// Conservatively, we disable this optimization when the
// transition is inside a loop. This can result in adding
// overhead unnecessarily in cases like:
// func f(n int, p *[]byte) {
// var s []byte
// for i := range n {
// *p = s
// s = append(s, 0)
// }
// }
i.s.Opt = nil
return
}
i.transition = loc i.transition = loc
} }
@ -237,7 +256,7 @@ func analyze(fn *ir.Func) {
// s = append(s, ...) is ok // s = append(s, ...) is ok
i.okUses += 2 i.okUses += 2
i.appends = append(i.appends, y) i.appends = append(i.appends, y)
i.appendWeight += 1 + loopDepth i.appendWeight += 1 + (loopDepth - i.declDepth)
} }
// TODO: s = append(nil, ...)? // TODO: s = append(nil, ...)?
} }
@ -277,6 +296,7 @@ func analyze(fn *ir.Func) {
n := n.(*ir.Decl) n := n.(*ir.Decl)
if i := tracking(n.X); i != nil { if i := tracking(n.X); i != nil {
i.okUses++ i.okUses++
i.declDepth = loopDepth
} }
case ir.OINDEX: case ir.OINDEX:
n := n.(*ir.IndexExpr) n := n.(*ir.IndexExpr)

View file

@ -481,6 +481,7 @@
(GetClosurePtr ...) => (LoweredGetClosurePtr ...) (GetClosurePtr ...) => (LoweredGetClosurePtr ...)
(GetCallerSP ...) => (LoweredGetCallerSP ...) (GetCallerSP ...) => (LoweredGetCallerSP ...)
(GetCallerPC ...) => (LoweredGetCallerPC ...) (GetCallerPC ...) => (LoweredGetCallerPC ...)
(MemEq ...) => (LoweredMemEq ...)
// Absorb pseudo-ops into blocks. // Absorb pseudo-ops into blocks.
(If (Equal cc) yes no) => (EQ cc yes no) (If (Equal cc) yes no) => (EQ cc yes no)

View file

@ -535,6 +535,7 @@ func init() {
// pseudo-ops // pseudo-ops
{name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem. {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem.
{name: "LoweredMemEq", argLength: 4, reg: regInfo{inputs: []regMask{buildReg("R0"), buildReg("R1"), buildReg("R2")}, outputs: []regMask{buildReg("R0")}, clobbers: callerSave}, typ: "Bool", faultOnNilArg0: true, faultOnNilArg1: true, clobberFlags: true, call: true}, // arg0, arg1 - pointers to memory, arg2=size, arg3=mem.
{name: "Equal", argLength: 1, reg: readflags}, // bool, true flags encode x==y false otherwise. {name: "Equal", argLength: 1, reg: readflags}, // bool, true flags encode x==y false otherwise.
{name: "NotEqual", argLength: 1, reg: readflags}, // bool, true flags encode x!=y false otherwise. {name: "NotEqual", argLength: 1, reg: readflags}, // bool, true flags encode x!=y false otherwise.

View file

@ -1525,6 +1525,41 @@
&& isSamePtr(p, q) && isSamePtr(p, q)
=> (MakeResult (ConstBool <typ.Bool> [true]) mem) => (MakeResult (ConstBool <typ.Bool> [true]) mem)
(MemEq sptr tptr (Const64 [1]) mem)
=> (Eq8 (Load <typ.Int8> sptr mem) (Load <typ.Int8> tptr mem))
(Load <typ.Int8> sptr:(Addr {scon} (SB)) mem)
&& symIsRO(scon)
=> (Const8 <typ.Int8> [int8(read8(scon,0))])
(MemEq sptr tptr (Const64 [2]) mem)
&& canLoadUnaligned(config)
=> (Eq16 (Load <typ.Int16> sptr mem) (Load <typ.Int16> tptr mem))
(Load <typ.Int16> sptr:(Addr {scon} (SB)) mem)
&& symIsRO(scon)
=> (Const16 <typ.Int16> [int16(read16(scon,0,config.ctxt.Arch.ByteOrder))])
(MemEq sptr tptr (Const64 [4]) mem)
&& canLoadUnaligned(config)
=> (Eq32 (Load <typ.Int32> sptr mem) (Load <typ.Int32> tptr mem))
(Load <typ.Int32> sptr:(Addr {scon} (SB)) mem)
&& symIsRO(scon)
=> (Const32 <typ.Int32> [int32(read32(scon,0,config.ctxt.Arch.ByteOrder))])
(MemEq sptr tptr (Const64 [8]) mem)
&& canLoadUnaligned(config) && config.PtrSize == 8
=> (Eq64 (Load <typ.Int64> sptr mem) (Load <typ.Int64> tptr mem))
(Load <typ.Int64> sptr:(Addr {scon} (SB)) mem)
&& symIsRO(scon)
=> (Const64 <typ.Int64> [int64(read64(scon,0,config.ctxt.Arch.ByteOrder))])
(MemEq _ _ (Const64 [0]) _) => (ConstBool <typ.Bool> [true])
(MemEq p q _ _) && isSamePtr(p, q) => (ConstBool <typ.Bool> [true])
// Turn known-size calls to memclrNoHeapPointers into a Zero. // Turn known-size calls to memclrNoHeapPointers into a Zero.
// Note that we are using types.Types[types.TUINT8] instead of sptr.Type.Elem() - see issue 55122 and CL 431496 for more details. // Note that we are using types.Types[types.TUINT8] instead of sptr.Type.Elem() - see issue 55122 and CL 431496 for more details.
(SelectN [0] call:(StaticCall {sym} sptr (Const(64|32) [c]) mem)) (SelectN [0] call:(StaticCall {sym} sptr (Const(64|32) [c]) mem))
@ -2019,8 +2054,13 @@
// See issue 56440. // See issue 56440.
// Note there are 2 rules here, one for the pre-decomposed []T result and one for // Note there are 2 rules here, one for the pre-decomposed []T result and one for
// the post-decomposed (*T,int,int) result. (The latter is generated after call expansion.) // the post-decomposed (*T,int,int) result. (The latter is generated after call expansion.)
(SliceLen (SelectN [0] (StaticLECall {sym} _ newLen:(Const(64|32)) _ _ _ _))) && isSameCall(sym, "runtime.growslice") => newLen // TODO(thepudds): we probably need the new growsliceBuf and growsliceBufNoAlias here as well?
(SelectN [1] (StaticCall {sym} _ newLen:(Const(64|32)) _ _ _ _)) && v.Type.IsInteger() && isSameCall(sym, "runtime.growslice") => newLen (SliceLen (SelectN [0] (StaticLECall {sym} _ newLen:(Const(64|32)) _ _ _ _)))
&& (isSameCall(sym, "runtime.growslice") || isSameCall(sym, "runtime.growsliceNoAlias"))
=> newLen
(SelectN [1] (StaticCall {sym} _ newLen:(Const(64|32)) _ _ _ _)) && v.Type.IsInteger()
&& (isSameCall(sym, "runtime.growslice") || isSameCall(sym, "runtime.growsliceNoAlias"))
=> newLen
// Collapse moving A -> B -> C into just A -> C. // Collapse moving A -> B -> C into just A -> C.
// Later passes (deadstore, elim unread auto) will remove the A -> B move, if possible. // Later passes (deadstore, elim unread auto) will remove the A -> B move, if possible.

View file

@ -679,6 +679,9 @@ var genericOps = []opData{
{name: "PrefetchCache", argLength: 2, hasSideEffects: true}, // Do prefetch arg0 to cache. arg0=addr, arg1=memory. {name: "PrefetchCache", argLength: 2, hasSideEffects: true}, // Do prefetch arg0 to cache. arg0=addr, arg1=memory.
{name: "PrefetchCacheStreamed", argLength: 2, hasSideEffects: true}, // Do non-temporal or streamed prefetch arg0 to cache. arg0=addr, arg1=memory. {name: "PrefetchCacheStreamed", argLength: 2, hasSideEffects: true}, // Do non-temporal or streamed prefetch arg0 to cache. arg0=addr, arg1=memory.
// Helper instruction which is semantically equivalent to calling runtime.memequal, but some targets may prefer to custom lower it later, e.g. for specific constant sizes.
{name: "MemEq", argLength: 4, commutative: true, typ: "Bool"}, // arg0=ptr0, arg1=ptr1, arg2=size, arg3=memory.
// SIMD // SIMD
{name: "ZeroSIMD", argLength: 0}, // zero value of a vector {name: "ZeroSIMD", argLength: 0}, // zero value of a vector

View file

@ -4409,6 +4409,7 @@ const (
OpARM64CALLclosure OpARM64CALLclosure
OpARM64CALLinter OpARM64CALLinter
OpARM64LoweredNilCheck OpARM64LoweredNilCheck
OpARM64LoweredMemEq
OpARM64Equal OpARM64Equal
OpARM64NotEqual OpARM64NotEqual
OpARM64LessThan OpARM64LessThan
@ -6123,6 +6124,7 @@ const (
OpClobberReg OpClobberReg
OpPrefetchCache OpPrefetchCache
OpPrefetchCacheStreamed OpPrefetchCacheStreamed
OpMemEq
OpZeroSIMD OpZeroSIMD
OpCvt16toMask8x16 OpCvt16toMask8x16
OpCvt32toMask8x32 OpCvt32toMask8x32
@ -68796,6 +68798,25 @@ var opcodeTable = [...]opInfo{
}, },
}, },
}, },
{
name: "LoweredMemEq",
argLen: 4,
clobberFlags: true,
call: true,
faultOnNilArg0: true,
faultOnNilArg1: true,
reg: regInfo{
inputs: []inputInfo{
{0, 1}, // R0
{1, 2}, // R1
{2, 4}, // R2
},
clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
outputs: []outputInfo{
{0, 1}, // R0
},
},
},
{ {
name: "Equal", name: "Equal",
argLen: 1, argLen: 1,
@ -88793,6 +88814,12 @@ var opcodeTable = [...]opInfo{
hasSideEffects: true, hasSideEffects: true,
generic: true, generic: true,
}, },
{
name: "MemEq",
argLen: 4,
commutative: true,
generic: true,
},
{ {
name: "ZeroSIMD", name: "ZeroSIMD",
argLen: 0, argLen: 0,

View file

@ -250,9 +250,51 @@ func fitsInBitsU(x uint64, b uint) bool {
return x>>b == 0 return x>>b == 0
} }
func noLimitForBitsize(bitsize uint) limit {
return limit{min: -(1 << (bitsize - 1)), max: 1<<(bitsize-1) - 1, umin: 0, umax: 1<<bitsize - 1}
}
func convertIntWithBitsize[Target uint64 | int64, Source uint64 | int64](x Source, bitsize uint) Target {
switch bitsize {
case 64:
return Target(x)
case 32:
return Target(int32(x))
case 16:
return Target(int16(x))
case 8:
return Target(int8(x))
default:
panic("unreachable")
}
}
// add returns the limit obtained by adding a value with limit l // add returns the limit obtained by adding a value with limit l
// to a value with limit l2. The result must fit in b bits. // to a value with limit l2. The result must fit in b bits.
func (l limit) add(l2 limit, b uint) limit { func (l limit) add(l2 limit, b uint) limit {
var isLConst, isL2Const bool
var lConst, l2Const uint64
if l.min == l.max {
isLConst = true
lConst = convertIntWithBitsize[uint64](l.min, b)
} else if l.umin == l.umax {
isLConst = true
lConst = l.umin
}
if l2.min == l2.max {
isL2Const = true
l2Const = convertIntWithBitsize[uint64](l2.min, b)
} else if l2.umin == l2.umax {
isL2Const = true
l2Const = l2.umin
}
if isLConst && isL2Const {
r := lConst + l2Const
r &= (uint64(1) << b) - 1
int64r := convertIntWithBitsize[int64](r, b)
return limit{min: int64r, max: int64r, umin: r, umax: r}
}
r := noLimit r := noLimit
min, minOk := safeAdd(l.min, l2.min, b) min, minOk := safeAdd(l.min, l2.min, b)
max, maxOk := safeAdd(l.max, l2.max, b) max, maxOk := safeAdd(l.max, l2.max, b)
@ -357,6 +399,11 @@ func (l limit) com(b uint) limit {
} }
} }
// Similar to add, but computes the negation of the limit for bitsize b.
func (l limit) neg(b uint) limit {
return l.com(b).add(limit{min: 1, max: 1, umin: 1, umax: 1}, b)
}
var noLimit = limit{math.MinInt64, math.MaxInt64, 0, math.MaxUint64} var noLimit = limit{math.MinInt64, math.MaxInt64, 0, math.MaxUint64}
// a limitFact is a limit known for a particular value. // a limitFact is a limit known for a particular value.
@ -1753,8 +1800,7 @@ func initLimit(v *Value) limit {
} }
// Default limits based on type. // Default limits based on type.
bitsize := v.Type.Size() * 8 lim := noLimitForBitsize(uint(v.Type.Size()) * 8)
lim := limit{min: -(1 << (bitsize - 1)), max: 1<<(bitsize-1) - 1, umin: 0, umax: 1<<bitsize - 1}
// Tighter limits on some opcodes. // Tighter limits on some opcodes.
switch v.Op { switch v.Op {
@ -1949,7 +1995,7 @@ func (ft *factsTable) flowLimit(v *Value) {
case OpNeg64, OpNeg32, OpNeg16, OpNeg8: case OpNeg64, OpNeg32, OpNeg16, OpNeg8:
a := ft.limits[v.Args[0].ID] a := ft.limits[v.Args[0].ID]
bitsize := uint(v.Type.Size()) * 8 bitsize := uint(v.Type.Size()) * 8
ft.newLimit(v, a.com(bitsize).add(limit{min: 1, max: 1, umin: 1, umax: 1}, bitsize)) ft.newLimit(v, a.neg(bitsize))
case OpMul64, OpMul32, OpMul16, OpMul8: case OpMul64, OpMul32, OpMul16, OpMul8:
a := ft.limits[v.Args[0].ID] a := ft.limits[v.Args[0].ID]
b := ft.limits[v.Args[1].ID] b := ft.limits[v.Args[1].ID]
@ -2040,14 +2086,14 @@ func (ft *factsTable) flowLimit(v *Value) {
// //
// slicecap - index >= slicelen - index >= K // slicecap - index >= slicelen - index >= K
// //
// Note that "index" is not useed for indexing in this pattern, but // Note that "index" is not used for indexing in this pattern, but
// in the motivating example (chunked slice iteration) it is. // in the motivating example (chunked slice iteration) it is.
func (ft *factsTable) detectSliceLenRelation(v *Value) { func (ft *factsTable) detectSliceLenRelation(v *Value) {
if v.Op != OpSub64 { if v.Op != OpSub64 {
return return
} }
if !(v.Args[0].Op == OpSliceLen || v.Args[0].Op == OpSliceCap) { if !(v.Args[0].Op == OpSliceLen || v.Args[0].Op == OpStringLen || v.Args[0].Op == OpSliceCap) {
return return
} }
@ -2070,9 +2116,9 @@ func (ft *factsTable) detectSliceLenRelation(v *Value) {
continue continue
} }
var lenOffset *Value var lenOffset *Value
if bound := ow.Args[0]; bound.Op == OpSliceLen && bound.Args[0] == slice { if bound := ow.Args[0]; (bound.Op == OpSliceLen || bound.Op == OpStringLen) && bound.Args[0] == slice {
lenOffset = ow.Args[1] lenOffset = ow.Args[1]
} else if bound := ow.Args[1]; bound.Op == OpSliceLen && bound.Args[0] == slice { } else if bound := ow.Args[1]; (bound.Op == OpSliceLen || bound.Op == OpStringLen) && bound.Args[0] == slice {
lenOffset = ow.Args[0] lenOffset = ow.Args[0]
} }
if lenOffset == nil || lenOffset.Op != OpConst64 { if lenOffset == nil || lenOffset.Op != OpConst64 {
@ -2332,7 +2378,7 @@ func unsignedSubUnderflows(a, b uint64) bool {
// iteration where the index is not directly compared to the length. // iteration where the index is not directly compared to the length.
// if isReslice, then delta can be equal to K. // if isReslice, then delta can be equal to K.
func checkForChunkedIndexBounds(ft *factsTable, b *Block, index, bound *Value, isReslice bool) bool { func checkForChunkedIndexBounds(ft *factsTable, b *Block, index, bound *Value, isReslice bool) bool {
if bound.Op != OpSliceLen && bound.Op != OpSliceCap { if bound.Op != OpSliceLen && bound.Op != OpStringLen && bound.Op != OpSliceCap {
return false return false
} }
@ -2367,9 +2413,9 @@ func checkForChunkedIndexBounds(ft *factsTable, b *Block, index, bound *Value, i
} }
if ow := o.w; ow.Op == OpAdd64 { if ow := o.w; ow.Op == OpAdd64 {
var lenOffset *Value var lenOffset *Value
if bound := ow.Args[0]; bound.Op == OpSliceLen && bound.Args[0] == slice { if bound := ow.Args[0]; (bound.Op == OpSliceLen || bound.Op == OpStringLen) && bound.Args[0] == slice {
lenOffset = ow.Args[1] lenOffset = ow.Args[1]
} else if bound := ow.Args[1]; bound.Op == OpSliceLen && bound.Args[0] == slice { } else if bound := ow.Args[1]; (bound.Op == OpSliceLen || bound.Op == OpStringLen) && bound.Args[0] == slice {
lenOffset = ow.Args[0] lenOffset = ow.Args[0]
} }
if lenOffset == nil || lenOffset.Op != OpConst64 { if lenOffset == nil || lenOffset.Op != OpConst64 {

View file

@ -0,0 +1,76 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssa
import (
"math"
"testing"
)
func testLimitUnaryOpSigned8(t *testing.T, opName string, op func(l limit, bitsize uint) limit, opImpl func(int8) int8) {
sizeLimit := noLimitForBitsize(8)
for min := math.MinInt8; min <= math.MaxInt8; min++ {
for max := min; max <= math.MaxInt8; max++ {
realSmallest, realBiggest := int8(math.MaxInt8), int8(math.MinInt8)
for i := min; i <= max; i++ {
result := opImpl(int8(i))
if result < realSmallest {
realSmallest = result
}
if result > realBiggest {
realBiggest = result
}
}
l := limit{int64(min), int64(max), 0, math.MaxUint64}
l = op(l, 8)
l = l.intersect(sizeLimit) // We assume this is gonna be used by newLimit which is seeded by the op size already.
if l.min != int64(realSmallest) || l.max != int64(realBiggest) {
t.Errorf("%s(%d..%d) = %d..%d; want %d..%d", opName, min, max, l.min, l.max, realSmallest, realBiggest)
}
}
}
}
func testLimitUnaryOpUnsigned8(t *testing.T, opName string, op func(l limit, bitsize uint) limit, opImpl func(uint8) uint8) {
sizeLimit := noLimitForBitsize(8)
for min := 0; min <= math.MaxUint8; min++ {
for max := min; max <= math.MaxUint8; max++ {
realSmallest, realBiggest := uint8(math.MaxUint8), uint8(0)
for i := min; i <= max; i++ {
result := opImpl(uint8(i))
if result < realSmallest {
realSmallest = result
}
if result > realBiggest {
realBiggest = result
}
}
l := limit{math.MinInt64, math.MaxInt64, uint64(min), uint64(max)}
l = op(l, 8)
l = l.intersect(sizeLimit) // We assume this is gonna be used by newLimit which is seeded by the op size already.
if l.umin != uint64(realSmallest) || l.umax != uint64(realBiggest) {
t.Errorf("%s(%d..%d) = %d..%d; want %d..%d", opName, min, max, l.umin, l.umax, realSmallest, realBiggest)
}
}
}
}
func TestLimitNegSigned(t *testing.T) {
testLimitUnaryOpSigned8(t, "neg", limit.neg, func(x int8) int8 { return -x })
}
func TestLimitNegUnsigned(t *testing.T) {
testLimitUnaryOpUnsigned8(t, "neg", limit.neg, func(x uint8) uint8 { return -x })
}
func TestLimitComSigned(t *testing.T) {
testLimitUnaryOpSigned8(t, "com", limit.com, func(x int8) int8 { return ^x })
}
func TestLimitComUnsigned(t *testing.T) {
testLimitUnaryOpUnsigned8(t, "com", limit.com, func(x uint8) uint8 { return ^x })
}

View file

@ -897,7 +897,15 @@ func (s *regAllocState) dropIfUnused(v *Value) {
} }
vi := &s.values[v.ID] vi := &s.values[v.ID]
r := vi.uses r := vi.uses
if r == nil || (!opcodeTable[v.Op].fixedReg && r.dist > s.nextCall[s.curIdx]) { nextCall := s.nextCall[s.curIdx]
if opcodeTable[v.Op].call {
if s.curIdx == len(s.nextCall)-1 {
nextCall = math.MaxInt32
} else {
nextCall = s.nextCall[s.curIdx+1]
}
}
if r == nil || (!opcodeTable[v.Op].fixedReg && r.dist > nextCall) {
s.freeRegs(vi.regs) s.freeRegs(vi.regs)
} }
} }
@ -1036,8 +1044,11 @@ func (s *regAllocState) regalloc(f *Func) {
regValLiveSet.add(v.ID) regValLiveSet.add(v.ID)
} }
} }
if len(s.nextCall) < len(b.Values) { if cap(s.nextCall) < len(b.Values) {
s.nextCall = append(s.nextCall, make([]int32, len(b.Values)-len(s.nextCall))...) c := cap(s.nextCall)
s.nextCall = append(s.nextCall[:c], make([]int32, len(b.Values)-c)...)
} else {
s.nextCall = s.nextCall[:len(b.Values)]
} }
var nextCall int32 = math.MaxInt32 var nextCall int32 = math.MaxInt32
for i := len(b.Values) - 1; i >= 0; i-- { for i := len(b.Values) - 1; i >= 0; i-- {

View file

@ -840,6 +840,9 @@ func rewriteValueARM64(v *Value) bool {
case OpMax64F: case OpMax64F:
v.Op = OpARM64FMAXD v.Op = OpARM64FMAXD
return true return true
case OpMemEq:
v.Op = OpARM64LoweredMemEq
return true
case OpMin32F: case OpMin32F:
v.Op = OpARM64FMINS v.Op = OpARM64FMINS
return true return true

View file

@ -224,6 +224,8 @@ func rewriteValuegeneric(v *Value) bool {
return rewriteValuegeneric_OpLsh8x64(v) return rewriteValuegeneric_OpLsh8x64(v)
case OpLsh8x8: case OpLsh8x8:
return rewriteValuegeneric_OpLsh8x8(v) return rewriteValuegeneric_OpLsh8x8(v)
case OpMemEq:
return rewriteValuegeneric_OpMemEq(v)
case OpMod16: case OpMod16:
return rewriteValuegeneric_OpMod16(v) return rewriteValuegeneric_OpMod16(v)
case OpMod16u: case OpMod16u:
@ -11869,6 +11871,8 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
b := v.Block b := v.Block
config := b.Func.Config
typ := &b.Func.Config.Types
// match: (Load <t1> p1 (Store {t2} p2 x _)) // match: (Load <t1> p1 (Store {t2} p2 x _))
// cond: isSamePtr(p1, p2) && copyCompatibleType(t1, x.Type) && t1.Size() == t2.Size() // cond: isSamePtr(p1, p2) && copyCompatibleType(t1, x.Type) && t1.Size() == t2.Size()
// result: x // result: x
@ -12453,6 +12457,102 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (Load <typ.Int8> sptr:(Addr {scon} (SB)) mem)
// cond: symIsRO(scon)
// result: (Const8 <typ.Int8> [int8(read8(scon,0))])
for {
if v.Type != typ.Int8 {
break
}
sptr := v_0
if sptr.Op != OpAddr {
break
}
scon := auxToSym(sptr.Aux)
sptr_0 := sptr.Args[0]
if sptr_0.Op != OpSB {
break
}
if !(symIsRO(scon)) {
break
}
v.reset(OpConst8)
v.Type = typ.Int8
v.AuxInt = int8ToAuxInt(int8(read8(scon, 0)))
return true
}
// match: (Load <typ.Int16> sptr:(Addr {scon} (SB)) mem)
// cond: symIsRO(scon)
// result: (Const16 <typ.Int16> [int16(read16(scon,0,config.ctxt.Arch.ByteOrder))])
for {
if v.Type != typ.Int16 {
break
}
sptr := v_0
if sptr.Op != OpAddr {
break
}
scon := auxToSym(sptr.Aux)
sptr_0 := sptr.Args[0]
if sptr_0.Op != OpSB {
break
}
if !(symIsRO(scon)) {
break
}
v.reset(OpConst16)
v.Type = typ.Int16
v.AuxInt = int16ToAuxInt(int16(read16(scon, 0, config.ctxt.Arch.ByteOrder)))
return true
}
// match: (Load <typ.Int32> sptr:(Addr {scon} (SB)) mem)
// cond: symIsRO(scon)
// result: (Const32 <typ.Int32> [int32(read32(scon,0,config.ctxt.Arch.ByteOrder))])
for {
if v.Type != typ.Int32 {
break
}
sptr := v_0
if sptr.Op != OpAddr {
break
}
scon := auxToSym(sptr.Aux)
sptr_0 := sptr.Args[0]
if sptr_0.Op != OpSB {
break
}
if !(symIsRO(scon)) {
break
}
v.reset(OpConst32)
v.Type = typ.Int32
v.AuxInt = int32ToAuxInt(int32(read32(scon, 0, config.ctxt.Arch.ByteOrder)))
return true
}
// match: (Load <typ.Int64> sptr:(Addr {scon} (SB)) mem)
// cond: symIsRO(scon)
// result: (Const64 <typ.Int64> [int64(read64(scon,0,config.ctxt.Arch.ByteOrder))])
for {
if v.Type != typ.Int64 {
break
}
sptr := v_0
if sptr.Op != OpAddr {
break
}
scon := auxToSym(sptr.Aux)
sptr_0 := sptr.Args[0]
if sptr_0.Op != OpSB {
break
}
if !(symIsRO(scon)) {
break
}
v.reset(OpConst64)
v.Type = typ.Int64
v.AuxInt = int64ToAuxInt(int64(read64(scon, 0, config.ctxt.Arch.ByteOrder)))
return true
}
// match: (Load (Addr {s} sb) _) // match: (Load (Addr {s} sb) _)
// cond: isFixedLoad(v, s, 0) // cond: isFixedLoad(v, s, 0)
// result: rewriteFixedLoad(v, s, sb, 0) // result: rewriteFixedLoad(v, s, sb, 0)
@ -14767,6 +14867,124 @@ func rewriteValuegeneric_OpLsh8x8(v *Value) bool {
} }
return false return false
} }
func rewriteValuegeneric_OpMemEq(v *Value) bool {
v_3 := v.Args[3]
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
config := b.Func.Config
typ := &b.Func.Config.Types
// match: (MemEq sptr tptr (Const64 [1]) mem)
// result: (Eq8 (Load <typ.Int8> sptr mem) (Load <typ.Int8> tptr mem))
for {
sptr := v_0
tptr := v_1
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 1 {
break
}
mem := v_3
v.reset(OpEq8)
v0 := b.NewValue0(v.Pos, OpLoad, typ.Int8)
v0.AddArg2(sptr, mem)
v1 := b.NewValue0(v.Pos, OpLoad, typ.Int8)
v1.AddArg2(tptr, mem)
v.AddArg2(v0, v1)
return true
}
// match: (MemEq sptr tptr (Const64 [2]) mem)
// cond: canLoadUnaligned(config)
// result: (Eq16 (Load <typ.Int16> sptr mem) (Load <typ.Int16> tptr mem))
for {
sptr := v_0
tptr := v_1
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 2 {
break
}
mem := v_3
if !(canLoadUnaligned(config)) {
break
}
v.reset(OpEq16)
v0 := b.NewValue0(v.Pos, OpLoad, typ.Int16)
v0.AddArg2(sptr, mem)
v1 := b.NewValue0(v.Pos, OpLoad, typ.Int16)
v1.AddArg2(tptr, mem)
v.AddArg2(v0, v1)
return true
}
// match: (MemEq sptr tptr (Const64 [4]) mem)
// cond: canLoadUnaligned(config)
// result: (Eq32 (Load <typ.Int32> sptr mem) (Load <typ.Int32> tptr mem))
for {
sptr := v_0
tptr := v_1
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 4 {
break
}
mem := v_3
if !(canLoadUnaligned(config)) {
break
}
v.reset(OpEq32)
v0 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
v0.AddArg2(sptr, mem)
v1 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
v1.AddArg2(tptr, mem)
v.AddArg2(v0, v1)
return true
}
// match: (MemEq sptr tptr (Const64 [8]) mem)
// cond: canLoadUnaligned(config) && config.PtrSize == 8
// result: (Eq64 (Load <typ.Int64> sptr mem) (Load <typ.Int64> tptr mem))
for {
sptr := v_0
tptr := v_1
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 8 {
break
}
mem := v_3
if !(canLoadUnaligned(config) && config.PtrSize == 8) {
break
}
v.reset(OpEq64)
v0 := b.NewValue0(v.Pos, OpLoad, typ.Int64)
v0.AddArg2(sptr, mem)
v1 := b.NewValue0(v.Pos, OpLoad, typ.Int64)
v1.AddArg2(tptr, mem)
v.AddArg2(v0, v1)
return true
}
// match: (MemEq _ _ (Const64 [0]) _)
// result: (ConstBool <typ.Bool> [true])
for {
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 0 {
break
}
v.reset(OpConstBool)
v.Type = typ.Bool
v.AuxInt = boolToAuxInt(true)
return true
}
// match: (MemEq p q _ _)
// cond: isSamePtr(p, q)
// result: (ConstBool <typ.Bool> [true])
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
p := v_0
q := v_1
if !(isSamePtr(p, q)) {
continue
}
v.reset(OpConstBool)
v.Type = typ.Bool
v.AuxInt = boolToAuxInt(true)
return true
}
break
}
return false
}
func rewriteValuegeneric_OpMod16(v *Value) bool { func rewriteValuegeneric_OpMod16(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
@ -29939,7 +30157,7 @@ func rewriteValuegeneric_OpSelectN(v *Value) bool {
return true return true
} }
// match: (SelectN [1] (StaticCall {sym} _ newLen:(Const64) _ _ _ _)) // match: (SelectN [1] (StaticCall {sym} _ newLen:(Const64) _ _ _ _))
// cond: v.Type.IsInteger() && isSameCall(sym, "runtime.growslice") // cond: v.Type.IsInteger() && (isSameCall(sym, "runtime.growslice") || isSameCall(sym, "runtime.growsliceNoAlias"))
// result: newLen // result: newLen
for { for {
if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStaticCall || len(v_0.Args) != 6 { if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStaticCall || len(v_0.Args) != 6 {
@ -29948,14 +30166,14 @@ func rewriteValuegeneric_OpSelectN(v *Value) bool {
sym := auxToCall(v_0.Aux) sym := auxToCall(v_0.Aux)
_ = v_0.Args[1] _ = v_0.Args[1]
newLen := v_0.Args[1] newLen := v_0.Args[1]
if newLen.Op != OpConst64 || !(v.Type.IsInteger() && isSameCall(sym, "runtime.growslice")) { if newLen.Op != OpConst64 || !(v.Type.IsInteger() && (isSameCall(sym, "runtime.growslice") || isSameCall(sym, "runtime.growsliceNoAlias"))) {
break break
} }
v.copyOf(newLen) v.copyOf(newLen)
return true return true
} }
// match: (SelectN [1] (StaticCall {sym} _ newLen:(Const32) _ _ _ _)) // match: (SelectN [1] (StaticCall {sym} _ newLen:(Const32) _ _ _ _))
// cond: v.Type.IsInteger() && isSameCall(sym, "runtime.growslice") // cond: v.Type.IsInteger() && (isSameCall(sym, "runtime.growslice") || isSameCall(sym, "runtime.growsliceNoAlias"))
// result: newLen // result: newLen
for { for {
if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStaticCall || len(v_0.Args) != 6 { if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStaticCall || len(v_0.Args) != 6 {
@ -29964,7 +30182,7 @@ func rewriteValuegeneric_OpSelectN(v *Value) bool {
sym := auxToCall(v_0.Aux) sym := auxToCall(v_0.Aux)
_ = v_0.Args[1] _ = v_0.Args[1]
newLen := v_0.Args[1] newLen := v_0.Args[1]
if newLen.Op != OpConst32 || !(v.Type.IsInteger() && isSameCall(sym, "runtime.growslice")) { if newLen.Op != OpConst32 || !(v.Type.IsInteger() && (isSameCall(sym, "runtime.growslice") || isSameCall(sym, "runtime.growsliceNoAlias"))) {
break break
} }
v.copyOf(newLen) v.copyOf(newLen)
@ -30376,7 +30594,7 @@ func rewriteValuegeneric_OpSliceLen(v *Value) bool {
return true return true
} }
// match: (SliceLen (SelectN [0] (StaticLECall {sym} _ newLen:(Const64) _ _ _ _))) // match: (SliceLen (SelectN [0] (StaticLECall {sym} _ newLen:(Const64) _ _ _ _)))
// cond: isSameCall(sym, "runtime.growslice") // cond: (isSameCall(sym, "runtime.growslice") || isSameCall(sym, "runtime.growsliceNoAlias"))
// result: newLen // result: newLen
for { for {
if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 { if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 {
@ -30389,14 +30607,14 @@ func rewriteValuegeneric_OpSliceLen(v *Value) bool {
sym := auxToCall(v_0_0.Aux) sym := auxToCall(v_0_0.Aux)
_ = v_0_0.Args[1] _ = v_0_0.Args[1]
newLen := v_0_0.Args[1] newLen := v_0_0.Args[1]
if newLen.Op != OpConst64 || !(isSameCall(sym, "runtime.growslice")) { if newLen.Op != OpConst64 || !(isSameCall(sym, "runtime.growslice") || isSameCall(sym, "runtime.growsliceNoAlias")) {
break break
} }
v.copyOf(newLen) v.copyOf(newLen)
return true return true
} }
// match: (SliceLen (SelectN [0] (StaticLECall {sym} _ newLen:(Const32) _ _ _ _))) // match: (SliceLen (SelectN [0] (StaticLECall {sym} _ newLen:(Const32) _ _ _ _)))
// cond: isSameCall(sym, "runtime.growslice") // cond: (isSameCall(sym, "runtime.growslice") || isSameCall(sym, "runtime.growsliceNoAlias"))
// result: newLen // result: newLen
for { for {
if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 { if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 {
@ -30409,7 +30627,7 @@ func rewriteValuegeneric_OpSliceLen(v *Value) bool {
sym := auxToCall(v_0_0.Aux) sym := auxToCall(v_0_0.Aux)
_ = v_0_0.Args[1] _ = v_0_0.Args[1]
newLen := v_0_0.Args[1] newLen := v_0_0.Args[1]
if newLen.Op != OpConst32 || !(isSameCall(sym, "runtime.growslice")) { if newLen.Op != OpConst32 || !(isSameCall(sym, "runtime.growslice") || isSameCall(sym, "runtime.growsliceNoAlias")) {
break break
} }
v.copyOf(newLen) v.copyOf(newLen)

View file

@ -196,6 +196,12 @@ func initIntrinsics(cfg *intrinsicBuildConfig) {
}, },
sys.AMD64, sys.I386, sys.ARM64, sys.ARM, sys.Loong64, sys.S390X) sys.AMD64, sys.I386, sys.ARM64, sys.ARM, sys.Loong64, sys.S390X)
addF("runtime", "memequal",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue4(ssa.OpMemEq, s.f.Config.Types.Bool, args[0], args[1], args[2], s.mem())
},
sys.ARM64)
if cfg.goppc64 >= 10 { if cfg.goppc64 >= 10 {
// Use only on Power10 as the new byte reverse instructions that Power10 provide // Use only on Power10 as the new byte reverse instructions that Power10 provide
// make it worthwhile as an intrinsic // make it worthwhile as an intrinsic

View file

@ -327,6 +327,7 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{
{"arm64", "math/bits", "TrailingZeros64"}: struct{}{}, {"arm64", "math/bits", "TrailingZeros64"}: struct{}{},
{"arm64", "math/bits", "TrailingZeros8"}: struct{}{}, {"arm64", "math/bits", "TrailingZeros8"}: struct{}{},
{"arm64", "runtime", "KeepAlive"}: struct{}{}, {"arm64", "runtime", "KeepAlive"}: struct{}{},
{"arm64", "runtime", "memequal"}: struct{}{},
{"arm64", "runtime", "publicationBarrier"}: struct{}{}, {"arm64", "runtime", "publicationBarrier"}: struct{}{},
{"arm64", "runtime", "slicebytetostringtmp"}: struct{}{}, {"arm64", "runtime", "slicebytetostringtmp"}: struct{}{},
{"arm64", "sync", "runtime_LoadAcquintptr"}: struct{}{}, {"arm64", "sync", "runtime_LoadAcquintptr"}: struct{}{},

View file

@ -12,6 +12,7 @@ import (
"go/constant" "go/constant"
"html" "html"
"internal/buildcfg" "internal/buildcfg"
"internal/goexperiment"
"internal/runtime/gc" "internal/runtime/gc"
"os" "os"
"path/filepath" "path/filepath"
@ -125,6 +126,8 @@ func InitConfig() {
ir.Syms.Goschedguarded = typecheck.LookupRuntimeFunc("goschedguarded") ir.Syms.Goschedguarded = typecheck.LookupRuntimeFunc("goschedguarded")
ir.Syms.Growslice = typecheck.LookupRuntimeFunc("growslice") ir.Syms.Growslice = typecheck.LookupRuntimeFunc("growslice")
ir.Syms.GrowsliceBuf = typecheck.LookupRuntimeFunc("growsliceBuf") ir.Syms.GrowsliceBuf = typecheck.LookupRuntimeFunc("growsliceBuf")
ir.Syms.GrowsliceBufNoAlias = typecheck.LookupRuntimeFunc("growsliceBufNoAlias")
ir.Syms.GrowsliceNoAlias = typecheck.LookupRuntimeFunc("growsliceNoAlias")
ir.Syms.MoveSlice = typecheck.LookupRuntimeFunc("moveSlice") ir.Syms.MoveSlice = typecheck.LookupRuntimeFunc("moveSlice")
ir.Syms.MoveSliceNoScan = typecheck.LookupRuntimeFunc("moveSliceNoScan") ir.Syms.MoveSliceNoScan = typecheck.LookupRuntimeFunc("moveSliceNoScan")
ir.Syms.MoveSliceNoCap = typecheck.LookupRuntimeFunc("moveSliceNoCap") ir.Syms.MoveSliceNoCap = typecheck.LookupRuntimeFunc("moveSliceNoCap")
@ -141,6 +144,7 @@ func InitConfig() {
} }
ir.Syms.MallocGC = typecheck.LookupRuntimeFunc("mallocgc") ir.Syms.MallocGC = typecheck.LookupRuntimeFunc("mallocgc")
ir.Syms.Memmove = typecheck.LookupRuntimeFunc("memmove") ir.Syms.Memmove = typecheck.LookupRuntimeFunc("memmove")
ir.Syms.Memequal = typecheck.LookupRuntimeFunc("memequal")
ir.Syms.Msanread = typecheck.LookupRuntimeFunc("msanread") ir.Syms.Msanread = typecheck.LookupRuntimeFunc("msanread")
ir.Syms.Msanwrite = typecheck.LookupRuntimeFunc("msanwrite") ir.Syms.Msanwrite = typecheck.LookupRuntimeFunc("msanwrite")
ir.Syms.Msanmove = typecheck.LookupRuntimeFunc("msanmove") ir.Syms.Msanmove = typecheck.LookupRuntimeFunc("msanmove")
@ -4047,9 +4051,25 @@ func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value {
s.defvars[s.f.Entry.ID][memVar] = mem s.defvars[s.f.Entry.ID][memVar] = mem
info.usedStatic = true info.usedStatic = true
} }
r = s.rtcall(ir.Syms.GrowsliceBuf, true, []*types.Type{n.Type()}, p, l, c, nargs, taddr, s.addr(info.store), s.constInt(types.Types[types.TINT], info.K)) fn := ir.Syms.GrowsliceBuf
if goexperiment.RuntimeFreegc && n.AppendNoAlias && !et.HasPointers() {
// The append is for a non-aliased slice where the runtime knows how to free
// the old logically dead backing store after growth.
// TODO(thepudds): for now, we only use the NoAlias version for element types
// without pointers while waiting on additional runtime support (CL 698515).
fn = ir.Syms.GrowsliceBufNoAlias
}
r = s.rtcall(fn, true, []*types.Type{n.Type()}, p, l, c, nargs, taddr, s.addr(info.store), s.constInt(types.Types[types.TINT], info.K))
} else { } else {
r = s.rtcall(ir.Syms.Growslice, true, []*types.Type{n.Type()}, p, l, c, nargs, taddr) fn := ir.Syms.Growslice
if goexperiment.RuntimeFreegc && n.AppendNoAlias && !et.HasPointers() {
// The append is for a non-aliased slice where the runtime knows how to free
// the old logically dead backing store after growth.
// TODO(thepudds): for now, we only use the NoAlias version for element types
// without pointers while waiting on additional runtime support (CL 698515).
fn = ir.Syms.GrowsliceNoAlias
}
r = s.rtcall(fn, true, []*types.Type{n.Type()}, p, l, c, nargs, taddr)
} }
// Decompose output slice // Decompose output slice

View file

@ -0,0 +1,298 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Tests of generated equality functions.
package test
import (
"reflect"
"testing"
"unsafe"
)
//go:noinline
func checkEq(t *testing.T, x, y any) {
// Make sure we don't inline the equality test.
if x != y {
t.Errorf("%#v != %#v, wanted equal", x, y)
}
}
//go:noinline
func checkNe(t *testing.T, x, y any) {
// Make sure we don't inline the equality test.
if x == y {
t.Errorf("%#v == %#v, wanted not equal", x, y)
}
}
//go:noinline
func checkPanic(t *testing.T, x, y any) {
defer func() {
if recover() == nil {
t.Errorf("%#v == %#v didn't panic", x, y)
}
}()
_ = x == y
}
type fooComparable struct {
x int
}
func (f fooComparable) foo() {
}
type fooIncomparable struct {
b func()
}
func (i fooIncomparable) foo() {
}
type eqResult int
const (
eq eqResult = iota
ne
panic_
)
func (x eqResult) String() string {
return []string{eq: "eq", ne: "ne", panic_: "panic"}[x]
}
// testEq returns eq if x==y, ne if x!=y, or panic_ if the comparison panics.
func testEq(x, y any) (r eqResult) {
defer func() {
if e := recover(); e != nil {
r = panic_
}
}()
r = ne
if x == y {
r = eq
}
return
}
// testCompare make two instances of struct type typ, then
// assigns its len(vals) fields one value from each slice in vals.
// Then it checks the results against a "manual" comparison field
// by field.
func testCompare(t *testing.T, typ reflect.Type, vals [][]any) {
if len(vals) != typ.NumField() {
t.Fatalf("bad test, have %d fields in the list, but %d fields in the type", len(vals), typ.NumField())
}
x := reflect.New(typ).Elem()
y := reflect.New(typ).Elem()
ps := powerSet(vals) // all possible settings of fields of the test type.
for _, xf := range ps { // Pick fields for x
for _, yf := range ps { // Pick fields for y
// Make x and y from their chosen fields.
for i, f := range xf {
x.Field(i).Set(reflect.ValueOf(f))
}
for i, f := range yf {
y.Field(i).Set(reflect.ValueOf(f))
}
// Compute what we want the result to be.
want := eq
for i := range len(vals) {
if c := testEq(xf[i], yf[i]); c != eq {
want = c
break
}
}
// Compute actual result using generated equality function.
got := testEq(x.Interface(), y.Interface())
if got != want {
t.Errorf("%#v == %#v, got %s want %s\n", x, y, got, want)
}
}
}
}
// powerset returns all possible sequences of choosing one
// element from each entry in s.
// For instance, if s = {{1,2}, {a,b}}, then
// it returns {{1,a},{1,b},{2,a},{2,b}}.
func powerSet(s [][]any) [][]any {
if len(s) == 0 {
return [][]any{{}}
}
p := powerSet(s[:len(s)-1]) // powerset from first len(s)-1 entries
var r [][]any
for _, head := range p {
// add one more entry.
for _, v := range s[len(s)-1] {
x := make([]any, 0, len(s))
x = append(x, head...)
x = append(x, v)
r = append(r, x)
}
}
return r
}
func TestCompareKinds1(t *testing.T) {
type S struct {
X0 int8
X1 int16
X2 int32
X3 int64
X4 float32
X5 float64
}
testCompare(t, reflect.TypeOf(S{}), [][]any{
{int8(0), int8(1)},
{int16(0), int16(1), int16(1 << 14)},
{int32(0), int32(1), int32(1 << 30)},
{int64(0), int64(1), int64(1 << 62)},
{float32(0), float32(1.0)},
{0.0, 1.0},
})
}
func TestCompareKinds2(t *testing.T) {
type S struct {
X0 uint8
X1 uint16
X2 uint32
X3 uint64
X4 uintptr
X5 bool
}
testCompare(t, reflect.TypeOf(S{}), [][]any{
{uint8(0), uint8(1)},
{uint16(0), uint16(1), uint16(1 << 15)},
{uint32(0), uint32(1), uint32(1 << 31)},
{uint64(0), uint64(1), uint64(1 << 63)},
{uintptr(0), uintptr(1)},
{false, true},
})
}
func TestCompareKinds3(t *testing.T) {
type S struct {
X0 complex64
X1 complex128
X2 *byte
X3 chan int
X4 unsafe.Pointer
}
testCompare(t, reflect.TypeOf(S{}), [][]any{
{complex64(1 + 1i), complex64(1 + 2i), complex64(2 + 1i)},
{complex128(1 + 1i), complex128(1 + 2i), complex128(2 + 1i)},
{new(byte), new(byte)},
{make(chan int), make(chan int)},
{unsafe.Pointer(new(byte)), unsafe.Pointer(new(byte))},
})
}
func TestCompareOrdering(t *testing.T) {
type S struct {
A string
E any
B string
}
testCompare(t, reflect.TypeOf(S{}), [][]any{
{"a", "b", "cc"},
{3, []byte{0}, []byte{1}},
{"a", "b", "cc"},
})
}
func TestCompareInterfaces(t *testing.T) {
type S struct {
A any
B fooer
}
testCompare(t, reflect.TypeOf(S{}), [][]any{
{3, []byte{0}},
{fooComparable{x: 3}, fooIncomparable{b: nil}},
})
}
func TestCompareSkip(t *testing.T) {
type S struct {
A int8
B int16
}
type S2 struct {
A int8
padding int8
B int16
}
x := S{A: 1, B: 3}
y := S{A: 1, B: 3}
(*S2)(unsafe.Pointer(&x)).padding = 88
(*S2)(unsafe.Pointer(&y)).padding = 99
want := eq
if got := testEq(x, y); got != want {
t.Errorf("%#v == %#v, got %s want %s", x, y, got, want)
}
}
func TestCompareMemequal(t *testing.T) {
type S struct {
s1 string
d [100]byte
s2 string
}
var x, y S
checkEq(t, x, y)
y.d[0] = 1
checkNe(t, x, y)
y.d[0] = 0
y.d[99] = 1
checkNe(t, x, y)
}
func TestComparePanic(t *testing.T) {
type S struct {
X0 string
X1 any
X2 string
X3 fooer
X4 string
}
testCompare(t, reflect.TypeOf(S{}), [][]any{
{"a", "b", "cc"}, // length equal, as well as length unequal
{3, []byte{1}}, // comparable and incomparable
{"a", "b", "cc"}, // length equal, as well as length unequal
{fooComparable{x: 3}, fooIncomparable{b: nil}}, // comparable and incomparable
{"a", "b", "cc"}, // length equal, as well as length unequal
})
}
func TestCompareArray(t *testing.T) {
type S struct {
X0 string
X1 [100]string
X2 string
}
x := S{X0: "a", X2: "b"}
y := x
checkEq(t, x, y)
x.X0 = "c"
checkNe(t, x, y)
x.X0 = "a"
x.X2 = "c"
checkNe(t, x, y)
x.X2 = "b"
checkEq(t, x, y)
for i := 0; i < 100; i++ {
x.X1[i] = "d"
checkNe(t, x, y)
y.X1[i] = "e"
checkNe(t, x, y)
x.X1[i] = ""
y.X1[i] = ""
checkEq(t, x, y)
}
}

View file

@ -0,0 +1,55 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package test
import (
"internal/asan"
"internal/goexperiment"
"internal/msan"
"internal/race"
"testing"
)
func TestFreeAppendAllocations(t *testing.T) {
t.Run("slice-no-alias", func(t *testing.T) {
if !goexperiment.RuntimeFreegc {
t.Skip("skipping allocation test when runtime.freegc is disabled")
}
if race.Enabled || msan.Enabled || asan.Enabled {
// TODO(thepudds): we get 8 allocs for slice-no-alias instead of 1 with -race. This
// might be expected given some allocation optimizations are already disabled
// under race, but if not, we might need to update walk.
t.Skip("skipping allocation test under race detector and other sanitizers")
}
allocs := testing.AllocsPerRun(100, func() {
var s []int64
for i := range 100 {
s = append(s, int64(i))
}
_ = s
})
t.Logf("allocs: %v", allocs)
if allocs != 1 {
t.Errorf("allocs: %v, want 1", allocs)
}
})
t.Run("slice-aliased", func(t *testing.T) {
allocs := testing.AllocsPerRun(100, func() {
var s []int64
var alias []int64
for i := range 100 {
s = append(s, int64(i))
alias = s
}
_ = alias
})
t.Logf("allocs: %v", allocs)
if allocs < 2 {
t.Errorf("allocs: %v, want >= 2", allocs)
}
})
}

View file

@ -57,6 +57,7 @@ func printuint(uint64)
func printcomplex128(complex128) func printcomplex128(complex128)
func printcomplex64(complex64) func printcomplex64(complex64)
func printstring(string) func printstring(string)
func printquoted(string)
func printpointer(any) func printpointer(any)
func printuintptr(uintptr) func printuintptr(uintptr)
func printiface(any) func printiface(any)
@ -196,6 +197,8 @@ func makeslice64(typ *byte, len int64, cap int64) unsafe.Pointer
func makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer func makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
func growslice(oldPtr *any, newLen, oldCap, num int, et *byte) (ary []any) func growslice(oldPtr *any, newLen, oldCap, num int, et *byte) (ary []any)
func growsliceBuf(oldPtr *any, newLen, oldCap, num int, et *byte, buf *any, bufLen int) (ary []any) func growsliceBuf(oldPtr *any, newLen, oldCap, num int, et *byte, buf *any, bufLen int) (ary []any)
func growsliceBufNoAlias(oldPtr *any, newLen, oldCap, num int, et *byte, buf *any, bufLen int) (ary []any)
func growsliceNoAlias(oldPtr *any, newLen, oldCap, num int, et *byte) (ary []any)
func unsafeslicecheckptr(typ *byte, ptr unsafe.Pointer, len int64) func unsafeslicecheckptr(typ *byte, ptr unsafe.Pointer, len int64)
func panicunsafeslicelen() func panicunsafeslicelen()
func panicunsafeslicenilptr() func panicunsafeslicenilptr()

View file

@ -64,6 +64,7 @@ var runtimeDecls = [...]struct {
{"printcomplex128", funcTag, 27}, {"printcomplex128", funcTag, 27},
{"printcomplex64", funcTag, 29}, {"printcomplex64", funcTag, 29},
{"printstring", funcTag, 31}, {"printstring", funcTag, 31},
{"printquoted", funcTag, 31},
{"printpointer", funcTag, 32}, {"printpointer", funcTag, 32},
{"printuintptr", funcTag, 33}, {"printuintptr", funcTag, 33},
{"printiface", funcTag, 32}, {"printiface", funcTag, 32},
@ -161,6 +162,8 @@ var runtimeDecls = [...]struct {
{"makeslicecopy", funcTag, 125}, {"makeslicecopy", funcTag, 125},
{"growslice", funcTag, 127}, {"growslice", funcTag, 127},
{"growsliceBuf", funcTag, 128}, {"growsliceBuf", funcTag, 128},
{"growsliceBufNoAlias", funcTag, 128},
{"growsliceNoAlias", funcTag, 127},
{"unsafeslicecheckptr", funcTag, 129}, {"unsafeslicecheckptr", funcTag, 129},
{"panicunsafeslicelen", funcTag, 9}, {"panicunsafeslicelen", funcTag, 9},
{"panicunsafeslicenilptr", funcTag, 9}, {"panicunsafeslicenilptr", funcTag, 9},

View file

@ -669,7 +669,7 @@ var cgoPrefixes = [...]string{
"_Cmacro_", // function to evaluate the expanded expression "_Cmacro_", // function to evaluate the expanded expression
} }
func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, def *TypeName, wantType bool) { func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, wantType bool) {
// these must be declared before the "goto Error" statements // these must be declared before the "goto Error" statements
var ( var (
obj Object obj Object
@ -715,7 +715,7 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, def *TypeName
} }
goto Error goto Error
} }
check.objDecl(exp, nil) check.objDecl(exp)
} else { } else {
exp = pkg.scope.Lookup(sel) exp = pkg.scope.Lookup(sel)
if exp == nil { if exp == nil {
@ -777,12 +777,6 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, def *TypeName
check.exprOrType(x, e.X, false) check.exprOrType(x, e.X, false)
switch x.mode { switch x.mode {
case typexpr:
// don't crash for "type T T.x" (was go.dev/issue/51509)
if def != nil && def.typ == x.typ {
check.cycleError([]Object{def}, 0)
goto Error
}
case builtin: case builtin:
check.errorf(e.Pos(), UncalledBuiltin, "invalid use of %s in selector expression", x) check.errorf(e.Pos(), UncalledBuiltin, "invalid use of %s in selector expression", x)
goto Error goto Error
@ -844,7 +838,7 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, def *TypeName
// methods may not have a fully set up signature yet // methods may not have a fully set up signature yet
if m, _ := obj.(*Func); m != nil { if m, _ := obj.(*Func); m != nil {
check.objDecl(m, nil) check.objDecl(m)
} }
if x.mode == typexpr { if x.mode == typexpr {

View file

@ -45,8 +45,7 @@ func pathString(path []Object) string {
} }
// objDecl type-checks the declaration of obj in its respective (file) environment. // objDecl type-checks the declaration of obj in its respective (file) environment.
// For the meaning of def, see Checker.definedType, in typexpr.go. func (check *Checker) objDecl(obj Object) {
func (check *Checker) objDecl(obj Object, def *TypeName) {
if tracePos { if tracePos {
check.pushPos(obj.Pos()) check.pushPos(obj.Pos())
defer func() { defer func() {
@ -156,7 +155,7 @@ func (check *Checker) objDecl(obj Object, def *TypeName) {
check.varDecl(obj, d.lhs, d.vtyp, d.init) check.varDecl(obj, d.lhs, d.vtyp, d.init)
case *TypeName: case *TypeName:
// invalid recursive types are detected via path // invalid recursive types are detected via path
check.typeDecl(obj, d.tdecl, def) check.typeDecl(obj, d.tdecl)
check.collectMethods(obj) // methods can only be added to top-level types check.collectMethods(obj) // methods can only be added to top-level types
case *Func: case *Func:
// functions may be recursive - no need to track dependencies // functions may be recursive - no need to track dependencies
@ -440,7 +439,7 @@ func (check *Checker) isImportedConstraint(typ Type) bool {
return u != nil && !u.IsMethodSet() return u != nil && !u.IsMethodSet()
} }
func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *TypeName) { func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl) {
assert(obj.typ == nil) assert(obj.typ == nil)
// Only report a version error if we have not reported one already. // Only report a version error if we have not reported one already.
@ -474,7 +473,6 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *TypeN
if check.conf.EnableAlias { if check.conf.EnableAlias {
alias := check.newAlias(obj, nil) alias := check.newAlias(obj, nil)
setDefType(def, alias)
// If we could not type the RHS, set it to invalid. This should // If we could not type the RHS, set it to invalid. This should
// only ever happen if we panic before setting. // only ever happen if we panic before setting.
@ -521,7 +519,6 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *TypeN
} }
named := check.newNamed(obj, nil, nil) named := check.newNamed(obj, nil, nil)
setDefType(def, named)
// The RHS of a named N can be nil if, for example, N is defined as a cycle of aliases with // The RHS of a named N can be nil if, for example, N is defined as a cycle of aliases with
// gotypesalias=0. Consider: // gotypesalias=0. Consider:
@ -878,7 +875,7 @@ func (check *Checker) declStmt(list []syntax.Decl) {
scopePos := s.Name.Pos() scopePos := s.Name.Pos()
check.declare(check.scope, s.Name, obj, scopePos) check.declare(check.scope, s.Name, obj, scopePos)
check.push(obj) // mark as grey check.push(obj) // mark as grey
check.typeDecl(obj, s, nil) check.typeDecl(obj, s)
check.pop() check.pop()
default: default:

View file

@ -993,6 +993,13 @@ func (check *Checker) rawExpr(T *target, x *operand, e syntax.Expr, hint Type, a
check.nonGeneric(T, x) check.nonGeneric(T, x)
} }
// Here, x is a value, meaning it has a type. If that type is pending, then we have
// a cycle. As an example:
//
// type T [unsafe.Sizeof(T{})]int
//
// has a cycle T->T which is deemed valid (by decl.go), but which is in fact invalid.
check.pendingType(x)
check.record(x) check.record(x)
return kind return kind
@ -1027,6 +1034,22 @@ func (check *Checker) nonGeneric(T *target, x *operand) {
} }
} }
// If x has a pending type (i.e. its declaring object is on the object path), pendingType
// reports an error and invalidates x.mode and x.typ.
// Otherwise it leaves x alone.
func (check *Checker) pendingType(x *operand) {
if x.mode == invalid || x.mode == novalue {
return
}
if n, ok := Unalias(x.typ).(*Named); ok {
if i, ok := check.objPathIdx[n.obj]; ok {
check.cycleError(check.objPath, i)
x.mode = invalid
x.typ = Typ[Invalid]
}
}
}
// exprInternal contains the core of type checking of expressions. // exprInternal contains the core of type checking of expressions.
// Must only be called by rawExpr. // Must only be called by rawExpr.
// (See rawExpr for an explanation of the parameters.) // (See rawExpr for an explanation of the parameters.)
@ -1044,7 +1067,7 @@ func (check *Checker) exprInternal(T *target, x *operand, e syntax.Expr, hint Ty
goto Error // error was reported before goto Error // error was reported before
case *syntax.Name: case *syntax.Name:
check.ident(x, e, nil, false) check.ident(x, e, false)
case *syntax.DotsType: case *syntax.DotsType:
// dots are handled explicitly where they are valid // dots are handled explicitly where they are valid
@ -1079,7 +1102,7 @@ func (check *Checker) exprInternal(T *target, x *operand, e syntax.Expr, hint Ty
return kind return kind
case *syntax.SelectorExpr: case *syntax.SelectorExpr:
check.selector(x, e, nil, false) check.selector(x, e, false)
case *syntax.IndexExpr: case *syntax.IndexExpr:
if check.indexExpr(x, e) { if check.indexExpr(x, e) {

View file

@ -23,6 +23,17 @@ func sprintf(qf Qualifier, tpSubscripts bool, format string, args ...any) string
panic("got operand instead of *operand") panic("got operand instead of *operand")
case *operand: case *operand:
arg = operandString(a, qf) arg = operandString(a, qf)
case []*operand:
var buf strings.Builder
buf.WriteByte('[')
for i, x := range a {
if i > 0 {
buf.WriteString(", ")
}
buf.WriteString(operandString(x, qf))
}
buf.WriteByte(']')
arg = buf.String()
case syntax.Pos: case syntax.Pos:
arg = a.String() arg = a.String()
case syntax.Expr: case syntax.Expr:

View file

@ -145,13 +145,6 @@ func (check *Checker) compositeLit(x *operand, e *syntax.CompositeLit, hint Type
switch u, _ := commonUnder(base, nil); utyp := u.(type) { switch u, _ := commonUnder(base, nil); utyp := u.(type) {
case *Struct: case *Struct:
// Prevent crash if the struct referred to is not yet set up.
// See analogous comment for *Array.
if utyp.fields == nil {
check.error(e, InvalidTypeCycle, "invalid recursive type")
x.mode = invalid
return
}
if len(e.ElemList) == 0 { if len(e.ElemList) == 0 {
break break
} }
@ -225,14 +218,6 @@ func (check *Checker) compositeLit(x *operand, e *syntax.CompositeLit, hint Type
} }
case *Array: case *Array:
// Prevent crash if the array referred to is not yet set up. Was go.dev/issue/18643.
// This is a stop-gap solution. Should use Checker.objPath to report entire
// path starting with earliest declaration in the source. TODO(gri) fix this.
if utyp.elem == nil {
check.error(e, InvalidTypeCycle, "invalid recursive type")
x.mode = invalid
return
}
n := check.indexedElts(e.ElemList, utyp.elem, utyp.len) n := check.indexedElts(e.ElemList, utyp.elem, utyp.len)
// If we have an array of unknown length (usually [...]T arrays, but also // If we have an array of unknown length (usually [...]T arrays, but also
// arrays [n]T where n is invalid) set the length now that we know it and // arrays [n]T where n is invalid) set the length now that we know it and
@ -254,23 +239,9 @@ func (check *Checker) compositeLit(x *operand, e *syntax.CompositeLit, hint Type
} }
case *Slice: case *Slice:
// Prevent crash if the slice referred to is not yet set up.
// See analogous comment for *Array.
if utyp.elem == nil {
check.error(e, InvalidTypeCycle, "invalid recursive type")
x.mode = invalid
return
}
check.indexedElts(e.ElemList, utyp.elem, -1) check.indexedElts(e.ElemList, utyp.elem, -1)
case *Map: case *Map:
// Prevent crash if the map referred to is not yet set up.
// See analogous comment for *Array.
if utyp.key == nil || utyp.elem == nil {
check.error(e, InvalidTypeCycle, "invalid recursive type")
x.mode = invalid
return
}
// If the map key type is an interface (but not a type parameter), // If the map key type is an interface (but not a type parameter),
// the type of a constant key must be considered when checking for // the type of a constant key must be considered when checking for
// duplicates. // duplicates.

View file

@ -447,7 +447,7 @@ func (check *Checker) missingMethod(V, T Type, static bool, equivalent func(x, y
// methods may not have a fully set up signature yet // methods may not have a fully set up signature yet
if check != nil { if check != nil {
check.objDecl(f, nil) check.objDecl(f)
} }
if !equivalent(f.typ, m.typ) { if !equivalent(f.typ, m.typ) {
@ -466,7 +466,7 @@ func (check *Checker) missingMethod(V, T Type, static bool, equivalent func(x, y
// This method may be formatted in funcString below, so must have a fully // This method may be formatted in funcString below, so must have a fully
// set up signature. // set up signature.
if check != nil { if check != nil {
check.objDecl(f, nil) check.objDecl(f)
} }
} }
switch state { switch state {

Some files were not shown because too many files have changed in this diff Show more